Adding Configmap for RIC service.
[ric-plt/ric-dep.git] / depRicKubernetesOperator / internal / controller / getConfigmap.go
1 package controller\r
2 \r
3 import (\r
4         corev1 "k8s.io/api/core/v1"\r
5         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"\r
6 )\r
7 \r
8 func GetConfigMap() []*corev1.ConfigMap {\r
9 \r
10         configMap1 := &corev1.ConfigMap{\r
11                 ObjectMeta: metav1.ObjectMeta{\r
12                         Name: "configmap-ricplt-a1mediator-a1conf",\r
13                 },\r
14                 TypeMeta: metav1.TypeMeta{\r
15                         APIVersion: "v1",\r
16                         Kind:       "ConfigMap",\r
17                 },\r
18                 Data: map[string]string{\r
19                         "local.rt": "newrt|start\n" +\r
20                                 "# Warning! this is not a functioning table because the subscription manager and route manager are now involved in a1 flows\n" +\r
21                                 "# the real routing table requires subscription ids as routing is now done over sub ids, but this isn't known until xapp deploy time, it's a dynamic process triggered by the xapp manager\n" +\r
22                                 "# there is a single message type for all messages a1 sends out now, subid is the other necessary piece of info\n" +\r
23                                 "# there are two message types a1 listens for; 20011 (instance response) and 20012 (query)\n" +\r
24                                 "# xapps likely use rts to reply with 20012 so the routing entry isn't needed for that in most cases\n" +\r
25                                 "mse|20010|SUBID|service-ricxapp-admctrl-rmr.ricxapp:4563\n" +\r
26                                 "rte|20011|service-ricplt-a1mediator-rmr.ricplt:4562\n" +\r
27                                 "rte|20012|service-ricplt-a1mediator-rmr.ricplt:4562\n" +\r
28                                 "newrt|end\n" +\r
29                                 "",\r
30                         "loglevel.txt": "log-level:",\r
31                 },\r
32         }\r
33 \r
34         configMap2 := &corev1.ConfigMap{\r
35                 Data: map[string]string{\r
36                         "CONFIG_MAP_NAME":             "/opt/route/loglevel.txt",\r
37                         "INSTANCE_DELETE_NO_RESP_TTL": "5",\r
38                         "INSTANCE_DELETE_RESP_TTL":    "10",\r
39                         "PYTHONUNBUFFERED":            "1",\r
40                         "RMR_RTG_SVC":                 "4561",\r
41                         "RMR_SRC_ID":                  "service-ricplt-a1mediator-rmr.ricplt",\r
42                         "A1_RMR_RETRY_TIMES":          "20",\r
43                 },\r
44                 ObjectMeta: metav1.ObjectMeta{\r
45                         Name: "configmap-ricplt-a1mediator-env",\r
46                 },\r
47                 TypeMeta: metav1.TypeMeta{\r
48                         APIVersion: "v1",\r
49                         Kind:       "ConfigMap",\r
50                 },\r
51         }\r
52 \r
53         configMap3 := &corev1.ConfigMap{\r
54                 Data: map[string]string{\r
55                         "ALARM_MGR_SERVICE_NAME": "service-ricplt-alarmmanager-rmr.ricplt",\r
56                         "ALARM_MGR_SERVICE_PORT": "4560",\r
57                 },\r
58                 ObjectMeta: metav1.ObjectMeta{\r
59                         Namespace: "ricplt",\r
60                         Name:      "configmap-ricplt-alarmmanager-appconfig",\r
61                 },\r
62                 TypeMeta: metav1.TypeMeta{\r
63                         APIVersion: "v1",\r
64                         Kind:       "ConfigMap",\r
65                 },\r
66         }\r
67 \r
68         configMap4 := &corev1.ConfigMap{\r
69                 ObjectMeta: metav1.ObjectMeta{\r
70                         Name:      "alarm-appconfig",\r
71                         Namespace: "ricxapp",\r
72                 },\r
73                 TypeMeta: metav1.TypeMeta{\r
74                         Kind:       "ConfigMap",\r
75                         APIVersion: "v1",\r
76                 },\r
77                 Data: map[string]string{\r
78                         "ALARM_MGR_SERVICE_NAME": "service-ricplt-alarmmanager-rmr.ricplt",\r
79                         "ALARM_MGR_SERVICE_PORT": "4560",\r
80                 },\r
81         }\r
82 \r
83         configMap5 := &corev1.ConfigMap{\r
84                 Data: map[string]string{\r
85                         "alarmmanagercfg": "{  \n" +\r
86                                 "  \"local\": {\n" +\r
87                                 "    \"host\": \":8080\"\n" +\r
88                                 "  },\n" +\r
89                                 "  \"logger\": {\n" +\r
90                                 "    \"level\": 4\n" +\r
91                                 "  },\n" +\r
92                                 "  \"db\": {\n" +\r
93                                 "    \"namespaces\": [\"sdl\", \"rnib\"]\n" +\r
94                                 "  },\n" +\r
95                                 "  \"rmr\": {\n" +\r
96                                 "    \"protPort\": \"tcp:4560\",\n" +\r
97                                 "    \"maxSize\": 1024,\n" +\r
98                                 "    \"numWorkers\": 1\n" +\r
99                                 "  },\n" +\r
100                                 "  \"controls\": {\n" +\r
101                                 "    \"promAlertManager\": {\n" +\r
102                                 "      \"address\": \"cpro-alertmanager:80\",\n" +\r
103                                 "      \"baseUrl\": \"api/v2\",\n" +\r
104                                 "      \"schemes\": \"http\",\n" +\r
105                                 "      \"alertInterval\": 30000\n" +\r
106                                 "    },\n" +\r
107                                 "    \"maxActiveAlarms\": 5000,\n" +\r
108                                 "    \"maxAlarmHistory\": 20000,\n" +\r
109                                 "    \"alarmInfoPvFile\": \"/mnt/pv-ricplt-alarmmanager/alarminfo.json\"\n" +\r
110                                 "  }\n" +\r
111                                 "}",\r
112                 },\r
113                 ObjectMeta: metav1.ObjectMeta{\r
114                         Name:      "configmap-ricplt-alarmmanager-alarmmanagercfg",\r
115                         Namespace: "ricplt",\r
116                 },\r
117                 TypeMeta: metav1.TypeMeta{\r
118                         APIVersion: "v1",\r
119                         Kind:       "ConfigMap",\r
120                 },\r
121         }\r
122 \r
123         configMap6 := &corev1.ConfigMap{\r
124                 Data: map[string]string{\r
125                         "RMR_SEED_RT": "/cfg/uta_rtg.rt",\r
126                         "RMR_SRC_ID":  "service-ricplt-alarmmanager-rmr.ricplt",\r
127                         "RMR_RTG_SVC": "service-ricplt-rtmgr-rmr:4561",\r
128                 },\r
129                 ObjectMeta: metav1.ObjectMeta{\r
130                         Name:      "configmap-ricplt-alarmmanager-env",\r
131                         Namespace: "ricplt",\r
132                 },\r
133                 TypeMeta: metav1.TypeMeta{\r
134                         APIVersion: "v1",\r
135                         Kind:       "ConfigMap",\r
136                 },\r
137         }\r
138 \r
139         configMap7 := &corev1.ConfigMap{\r
140                 Data: map[string]string{\r
141                         "appmgr.yaml": "\"local\":\n" +\r
142                                 "  # Port on which the xapp-manager REST services are provided\n" +\r
143                                 "  \"host\": \":8080\"\n" +\r
144                                 "\"helm\":\n" +\r
145                                 "  # Remote helm repo URL. UPDATE this as required.\n" +\r
146                                 "  \"repo\": \"\\\"http://service-ricplt-xapp-onboarder-http:8080\\\"\"\n" +\r
147                                 "\n" +\r
148                                 "  # Repo name referred within the xapp-manager\n" +\r
149                                 "  \"repo-name\": \"helm-repo\"\n" +\r
150                                 "\n" +\r
151                                 "  # Tiller service details in the cluster. UPDATE this as required.\n" +\r
152                                 "  \"tiller-service\": service-tiller-ricxapp\n" +\r
153                                 "  \"tiller-namespace\": ricinfra\n" +\r
154                                 "  \"tiller-port\": \"44134\"\n" +\r
155                                 "  # helm username and password files\n" +\r
156                                 "  \"helm-username-file\": \"/opt/ric/secret/helm_repo_username\"\n" +\r
157                                 "  \"helm-password-file\": \"/opt/ric/secret/helm_repo_password\"\n" +\r
158                                 "  \"retry\": 1\n" +\r
159                                 "\"xapp\":\n" +\r
160                                 "  #Namespace to install xAPPs\n" +\r
161                                 "  \"namespace\": \"ricxapp\"\n" +\r
162                                 "  \"tarDir\": \"/tmp\"\n" +\r
163                                 "  \"schema\": \"descriptors/schema.json\"\n" +\r
164                                 "  \"config\": \"config/config-file.json\"\n" +\r
165                                 "  \"tmpConfig\": \"/tmp/config-file.json\"\n" +\r
166                                 "",\r
167                 },\r
168                 ObjectMeta: metav1.ObjectMeta{\r
169                         Name: "configmap-ricplt-appmgr-appconfig",\r
170                 },\r
171                 TypeMeta: metav1.TypeMeta{\r
172                         APIVersion: "v1",\r
173                         Kind:       "ConfigMap",\r
174                 },\r
175         }\r
176 \r
177         configMap8 := &corev1.ConfigMap{\r
178                 Data: map[string]string{\r
179                         "appmgr-tiller-secret-copier.sh": "#!/bin/sh\n" +\r
180                                 "if [ -x /svcacct-to-kubeconfig.sh ] ; then\n" +\r
181                                 " /svcacct-to-kubeconfig.sh\n" +\r
182                                 "fi\n" +\r
183                                 "\n" +\r
184                                 "if [ ! -z \"${HELM_TLS_CA_CERT}\" ]; then\n" +\r
185                                 "  kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +\r
186                                 "   grep 'ca.crt:' | \\\n" +\r
187                                 "   awk '{print $2}' | \\\n" +\r
188                                 "   base64 -d > ${HELM_TLS_CA_CERT}\n" +\r
189                                 "fi\n" +\r
190                                 "\n" +\r
191                                 "if [ ! -z \"${HELM_TLS_CERT}\" ]; then\n" +\r
192                                 "  kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +\r
193                                 "   grep 'tls.crt:' | \\\n" +\r
194                                 "   awk '{print $2}' | \\\n" +\r
195                                 "   base64 -d > ${HELM_TLS_CERT}\n" +\r
196                                 "fi\n" +\r
197                                 "\n" +\r
198                                 "if [ ! -z \"${HELM_TLS_KEY}\" ]; then\n" +\r
199                                 "  kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +\r
200                                 "   grep 'tls.key:' | \\\n" +\r
201                                 "   awk '{print $2}' | \\\n" +\r
202                                 "   base64 -d > ${HELM_TLS_KEY}\n" +\r
203                                 "fi\n" +\r
204                                 "",\r
205                         "svcacct-to-kubeconfig.sh": "#!/bin/sh\n" +\r
206                                 "\n" +\r
207                                 "# generate a kubconfig (at ${KUBECONFIG} file from the automatically-mounted\n" +\r
208                                 "# service account token.\n" +\r
209                                 "# ENVIRONMENT:\n" +\r
210                                 "# SVCACCT_NAME: the name of the service account user.  default \"default\"\n" +\r
211                                 "# CLUSTER_NAME: the name of the kubernetes cluster.  default \"kubernetes\"\n" +\r
212                                 "# KUBECONFIG: where the generated file will be deposited.\n" +\r
213                                 "SVCACCT_TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token`\n" +\r
214                                 "CLUSTER_CA=`base64 /var/run/secrets/kubernetes.io/serviceaccount/ca.crt|tr -d '\\n'`\n" +\r
215                                 "\n" +\r
216                                 "cat >${KUBECONFIG} <<__EOF__\n" +\r
217                                 "ApiVersion: v1\n" +\r
218                                 "kind: Config\n" +\r
219                                 "users:\n" +\r
220                                 "- name: ${SVCACCT_NAME:-default}\n" +\r
221                                 "  user:\n" +\r
222                                 "    token: ${SVCACCT_TOKEN}\n" +\r
223                                 "clusters:\n" +\r
224                                 "- cluster:\n" +\r
225                                 "    certificate-authority-data: ${CLUSTER_CA}\n" +\r
226                                 "    server: ${K8S_API_HOST:-https://kubernetes.default.svc.cluster.local/}\n" +\r
227                                 "  name: ${CLUSTER_NAME:-kubernetes}\n" +\r
228                                 "contexts:\n" +\r
229                                 "- context:\n" +\r
230                                 "    cluster: ${CLUSTER_NAME:-kubernetes}\n" +\r
231                                 "    user: ${SVCACCT_NAME:-default}\n" +\r
232                                 "  name: svcs-acct-context\n" +\r
233                                 "current-context: svcs-acct-context\n" +\r
234                                 "__EOF__\n" +\r
235                                 "",\r
236                 },\r
237                 ObjectMeta: metav1.ObjectMeta{\r
238                         Name: "configmap-ricplt-appmgr-bin",\r
239                 },\r
240                 TypeMeta: metav1.TypeMeta{\r
241                         Kind:       "ConfigMap",\r
242                         APIVersion: "v1",\r
243                 },\r
244         }\r
245 \r
246         configMap9 := &corev1.ConfigMap{\r
247                 Data: map[string]string{\r
248                         "RMR_RTG_SVC":       "4561",\r
249                         "HELM_TLS_CA_CERT":  "/opt/ric/secret/tiller-ca.cert",\r
250                         "HELM_TLS_CERT":     "/opt/ric/secret/helm-client.cert",\r
251                         "HELM_TLS_HOSTNAME": "service-tiller-ricxapp",\r
252                         "HELM_TLS_VERIFY":   "true",\r
253                         "NAME":              "xappmgr",\r
254                         "HELM_HOST":         "service-tiller-ricxapp.ricinfra:44134",\r
255                         "HELM_TLS_ENABLED":  "true",\r
256                         "HELM_TLS_KEY":      "/opt/ric/secret/helm-client.key",\r
257                 },\r
258                 ObjectMeta: metav1.ObjectMeta{\r
259                         Name: "configmap-ricplt-appmgr-env",\r
260                 },\r
261                 TypeMeta: metav1.TypeMeta{\r
262                         APIVersion: "v1",\r
263                         Kind:       "ConfigMap",\r
264                 },\r
265         }\r
266 \r
267         configMap10 := &corev1.ConfigMap{\r
268                 Data: map[string]string{\r
269                         "DBAAS_NODE_COUNT":   "1",\r
270                         "DBAAS_SERVICE_HOST": "service-ricplt-dbaas-tcp.ricplt",\r
271                         "DBAAS_SERVICE_PORT": "6379",\r
272                 },\r
273                 ObjectMeta: metav1.ObjectMeta{\r
274                         Name:      "configmap-ricplt-dbaas-appconfig",\r
275                         Namespace: "ricplt",\r
276                 },\r
277                 TypeMeta: metav1.TypeMeta{\r
278                         APIVersion: "v1",\r
279                         Kind:       "ConfigMap",\r
280                 },\r
281         }\r
282 \r
283         configMap11 := &corev1.ConfigMap{\r
284                 Data: map[string]string{\r
285                         "DBAAS_NODE_COUNT":   "1",\r
286                         "DBAAS_SERVICE_HOST": "service-ricplt-dbaas-tcp.ricplt",\r
287                         "DBAAS_SERVICE_PORT": "6379",\r
288                 },\r
289                 ObjectMeta: metav1.ObjectMeta{\r
290                         Name:      "dbaas-appconfig",\r
291                         Namespace: "ricxapp",\r
292                 },\r
293                 TypeMeta: metav1.TypeMeta{\r
294                         APIVersion: "v1",\r
295                         Kind:       "ConfigMap",\r
296                 },\r
297         }\r
298 \r
299         configMap12 := &corev1.ConfigMap{\r
300                 TypeMeta: metav1.TypeMeta{\r
301                         APIVersion: "v1",\r
302                         Kind:       "ConfigMap",\r
303                 },\r
304                 Data: map[string]string{\r
305                         "redis.conf": "dir \"/data\"\n" +\r
306                                 "appendonly no\n" +\r
307                                 "bind 0.0.0.0\n" +\r
308                                 "loadmodule /usr/local/libexec/redismodule/libredismodule.so\n" +\r
309                                 "protected-mode no\n" +\r
310                                 "save\n" +\r
311                                 "",\r
312                 },\r
313                 ObjectMeta: metav1.ObjectMeta{\r
314                         Namespace: "ricplt",\r
315                         Labels: map[string]string{\r
316                                 "app":      "ricplt-dbaas",\r
317                                 "chart":    "dbaas-2.0.0",\r
318                                 "heritage": "Helm",\r
319                                 "release":  "release-name",\r
320                         },\r
321                         Name: "configmap-ricplt-dbaas-config",\r
322                 },\r
323         }\r
324 \r
325         configMap13 := &corev1.ConfigMap{\r
326                 Data: map[string]string{\r
327                         "rmr_verbose": "0\n" +\r
328                                 "",\r
329                         "router.txt": "newrt|start\n" +\r
330                                 "rte|1080|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
331                                 "rte|1090|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
332                                 "rte|1100|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
333                                 "rte|1101|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
334                                 "rte|1200|service-ricplt-rsm-rmr.ricplt:4801\n" +\r
335                                 "rte|1210|service-ricplt-rsm-rmr.ricplt:4801\n" +\r
336                                 "rte|1220|service-ricplt-rsm-rmr.ricplt:4801\n" +\r
337                                 "rte|10020|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
338                                 "rte|10060|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
339                                 "rte|10061|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
340                                 "rte|10062|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
341                                 "rte|10070|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
342                                 "rte|10071|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
343                                 "rte|10080|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
344                                 "rte|10360|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
345                                 "rte|10361|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
346                                 "rte|10362|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
347                                 "rte|10370|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
348                                 "rte|10371|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
349                                 "rte|12010|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
350                                 "rte|12020|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
351                                 "rte|20001|service-ricplt-a1mediator-rmr.ricplt:4562\n" +\r
352                                 "newrt|end",\r
353                 },\r
354                 ObjectMeta: metav1.ObjectMeta{\r
355                         Name:      "configmap-ricplt-e2mgr-router-configmap",\r
356                         Namespace: "ricplt",\r
357                 },\r
358                 TypeMeta: metav1.TypeMeta{\r
359                         APIVersion: "v1",\r
360                         Kind:       "ConfigMap",\r
361                 },\r
362         }\r
363 \r
364         configMap14 := &corev1.ConfigMap{\r
365                 Data: map[string]string{\r
366                         "configuration.yaml": "logging:\n" +\r
367                                 "  logLevel:  \"info\"     \n" +\r
368                                 "http:\n" +\r
369                                 "  port: 3800\n" +\r
370                                 "rmr:\n" +\r
371                                 "  port: 3801\n" +\r
372                                 "  maxMsgSize: 65536\n" +\r
373                                 "\n" +\r
374                                 "routingManager:\n" +\r
375                                 "  baseUrl: \"http://service-ricplt-rtmgr-http:3800/ric/v1/handles/\"\n" +\r
376                                 "notificationResponseBuffer: 100\n" +\r
377                                 "bigRedButtonTimeoutSec: 5 \n" +\r
378                                 "maxConnectionAttempts: 3 \n" +\r
379                                 "maxRnibConnectionAttempts: 3 \n" +\r
380                                 "rnibRetryIntervalMs: 10\n" +\r
381                                 "keepAliveResponseTimeoutMs: 360000\n" +\r
382                                 "keepAliveDelayMs: 120000\n" +\r
383                                 "\n" +\r
384                                 "globalRicId:\n" +\r
385                                 "  ricId: \"AACCE\"\n" +\r
386                                 "  mcc: \"310\"\n" +\r
387                                 "  mnc: \"411\"\n" +\r
388                                 "  \n" +\r
389                                 "rnibWriter:\n" +\r
390                                 "  stateChangeMessageChannel: \"RAN_CONNECTION_STATUS_CHANGE\"\n" +\r
391                                 "  ranManipulationMessageChannel: \"RAN_MANIPULATION\"",\r
392                 },\r
393                 ObjectMeta: metav1.ObjectMeta{\r
394                         Name:      "configmap-ricplt-e2mgr-configuration-configmap",\r
395                         Namespace: "ricplt",\r
396                 },\r
397                 TypeMeta: metav1.TypeMeta{\r
398                         APIVersion: "v1",\r
399                         Kind:       "ConfigMap",\r
400                 },\r
401         }\r
402 \r
403         configMap15 := &corev1.ConfigMap{\r
404                 Data: map[string]string{\r
405                         "logcfg": "loglevel: 3",\r
406                 },\r
407                 ObjectMeta: metav1.ObjectMeta{\r
408                         Name:      "configmap-ricplt-e2mgr-loglevel-configmap",\r
409                         Namespace: "ricplt",\r
410                 },\r
411                 TypeMeta: metav1.TypeMeta{\r
412                         APIVersion: "v1",\r
413                         Kind:       "ConfigMap",\r
414                 },\r
415         }\r
416 \r
417         configMap16 := &corev1.ConfigMap{\r
418                 Data: map[string]string{\r
419                         "RMR_RTG_SVC": "4561",\r
420                         "RMR_SRC_ID":  "service-ricplt-e2mgr-rmr.ricplt",\r
421                 },\r
422                 ObjectMeta: metav1.ObjectMeta{\r
423                         Name: "configmap-ricplt-e2mgr-env",\r
424                 },\r
425                 TypeMeta: metav1.TypeMeta{\r
426                         APIVersion: "v1",\r
427                         Kind:       "ConfigMap",\r
428                 },\r
429         }\r
430 \r
431         configMap17 := &corev1.ConfigMap{\r
432                 Data: map[string]string{\r
433                         "log-level": "log-level: 3",\r
434                 },\r
435                 ObjectMeta: metav1.ObjectMeta{\r
436                         Name:      "configmap-ricplt-e2term-loglevel-configmap",\r
437                         Namespace: "ricplt",\r
438                 },\r
439                 TypeMeta: metav1.TypeMeta{\r
440                         APIVersion: "v1",\r
441                         Kind:       "ConfigMap",\r
442                 },\r
443         }\r
444 \r
445         configMap18 := &corev1.ConfigMap{\r
446                 ObjectMeta: metav1.ObjectMeta{\r
447                         Name:      "configmap-ricplt-e2term-router-configmap",\r
448                         Namespace: "ricplt",\r
449                 },\r
450                 TypeMeta: metav1.TypeMeta{\r
451                         APIVersion: "v1",\r
452                         Kind:       "ConfigMap",\r
453                 },\r
454                 Data: map[string]string{\r
455                         "rmr_verbose": "0\n" +\r
456                                 "",\r
457                         "router.txt": "newrt|start\n" +\r
458                                 "rte|1080|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
459                                 "rte|1090|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
460                                 "rte|1100|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
461                                 "rte|10020|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
462                                 "rte|10060|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
463                                 "rte|10061|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
464                                 "rte|10062|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
465                                 "rte|10030|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
466                                 "rte|10070|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
467                                 "rte|10071|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
468                                 "rte|10080|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
469                                 "rte|10091|service-ricplt-rsm-rmr.ricplt:4801\n" +\r
470                                 "rte|10092|service-ricplt-rsm-rmr.ricplt:4801\n" +\r
471                                 "rte|10360|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
472                                 "rte|10361|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
473                                 "rte|10362|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
474                                 "rte|10370|service-ricplt-e2mgr-rmr.ricplt:3801\n" +\r
475                                 "rte|10371|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
476                                 "rte|12010|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
477                                 "rte|12020|service-ricplt-e2term-rmr-alpha.ricplt:38000\n" +\r
478                                 "rte|20001|service-ricplt-a1mediator-rmr.ricplt:4562\n" +\r
479                                 "rte|12011|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
480                                 "rte|12050|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
481                                 "rte|12012|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
482                                 "rte|12021|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
483                                 "rte|12022|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
484                                 "rte|12041|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
485                                 "rte|12042|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
486                                 "rte|12050|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
487                                 "rte|20000|service-ricxapp-ueec-rmr.ricxapp:4560;service-admission-ctrl-xapp-rmr.ricxapp:4560\n" +\r
488                                 "newrt|end",\r
489                 },\r
490         }\r
491 \r
492         configMap19 := &corev1.ConfigMap{\r
493                 ObjectMeta: metav1.ObjectMeta{\r
494                         Name: "configmap-ricplt-e2term-env-alpha",\r
495                 },\r
496                 TypeMeta: metav1.TypeMeta{\r
497                         APIVersion: "v1",\r
498                         Kind:       "ConfigMap",\r
499                 },\r
500                 Data: map[string]string{\r
501                         "RMR_SEED_RT":   "router.txt",\r
502                         "RMR_SRC_ID":    "service-ricplt-e2term-rmr-alpha.ricplt",\r
503                         "RMR_VCTL_FILE": "/tmp/rmr_verbose",\r
504                         "nano":          "38000",\r
505                         "print":         "1",\r
506                         "sctp":          "36422",\r
507                         "volume":        "/data/outgoing/",\r
508                         "RMR_RTG_SVC":   "4561",\r
509                 },\r
510         }\r
511 \r
512         configMap20 := &corev1.ConfigMap{\r
513                 Data: map[string]string{\r
514                         "servers.conf": "# Prometheus metrics and health-checking server\n" +\r
515                                 "server {\n" +\r
516                                 "    server_name kong_prometheus_exporter;\n" +\r
517                                 "    listen 0.0.0.0:9542; # can be any other port as well\n" +\r
518                                 "    access_log off;\n" +\r
519                                 "    location /status {\n" +\r
520                                 "        default_type text/plain;\n" +\r
521                                 "        return 200;\n" +\r
522                                 "    }\n" +\r
523                                 "    location /metrics {\n" +\r
524                                 "        default_type text/plain;\n" +\r
525                                 "        content_by_lua_block {\n" +\r
526                                 "             local prometheus = require \"kong.plugins.prometheus.exporter\"\n" +\r
527                                 "             prometheus:collect()\n" +\r
528                                 "        }\n" +\r
529                                 "    }\n" +\r
530                                 "    location /nginx_status {\n" +\r
531                                 "        internal;\n" +\r
532                                 "        access_log off;\n" +\r
533                                 "        stub_status;\n" +\r
534                                 "    }\n" +\r
535                                 "}\n" +\r
536                                 "",\r
537                 },\r
538                 ObjectMeta: metav1.ObjectMeta{\r
539                         Labels: map[string]string{\r
540                                 "app.kubernetes.io/name":       "kong",\r
541                                 "app.kubernetes.io/version":    "1.4",\r
542                                 "helm.sh/chart":                "kong-0.36.6",\r
543                                 "app.kubernetes.io/instance":   "release-name",\r
544                                 "app.kubernetes.io/managed-by": "Helm",\r
545                         },\r
546                         Name: "release-name-kong-default-custom-server-blocks",\r
547                 },\r
548                 TypeMeta: metav1.TypeMeta{\r
549                         APIVersion: "v1",\r
550                         Kind:       "ConfigMap",\r
551                 },\r
552         }\r
553 \r
554         configMap21 := &corev1.ConfigMap{\r
555                 Data: map[string]string{\r
556                         "TRACING_JAEGER_LOG_LEVEL":     "error",\r
557                         "TRACING_JAEGER_SAMPLER_PARAM": "1",\r
558                         "TRACING_JAEGER_SAMPLER_TYPE":  "const",\r
559                         "TRACING_ENABLED":              "0",\r
560                         "TRACING_JAEGER_AGENT_ADDR":    "service-ricplt-jaegeradapter-agent.ricplt",\r
561                 },\r
562                 ObjectMeta: metav1.ObjectMeta{\r
563                         Namespace: "ricplt",\r
564                         Name:      "configmap-ricplt-jaegeradapter",\r
565                 },\r
566                 TypeMeta: metav1.TypeMeta{\r
567                         Kind:       "ConfigMap",\r
568                         APIVersion: "v1",\r
569                 },\r
570         }\r
571 \r
572         configMap22 := &corev1.ConfigMap{\r
573                 TypeMeta: metav1.TypeMeta{\r
574                         APIVersion: "v1",\r
575                         Kind:       "ConfigMap",\r
576                 },\r
577                 Data: map[string]string{\r
578                         "servers.conf": "# Prometheus metrics and health-checking server\n" +\r
579                                 "server {\n" +\r
580                                 "    server_name kong_prometheus_exporter;\n" +\r
581                                 "    listen 0.0.0.0:9542; # can be any other port as well\n" +\r
582                                 "    access_log off;\n" +\r
583                                 "    location /status {\n" +\r
584                                 "        default_type text/plain;\n" +\r
585                                 "        return 200;\n" +\r
586                                 "    }\n" +\r
587                                 "    location /metrics {\n" +\r
588                                 "        default_type text/plain;\n" +\r
589                                 "        content_by_lua_block {\n" +\r
590                                 "             local prometheus = require \"kong.plugins.prometheus.exporter\"\n" +\r
591                                 "             prometheus:collect()\n" +\r
592                                 "        }\n" +\r
593                                 "    }\n" +\r
594                                 "    location /nginx_status {\n" +\r
595                                 "        internal;\n" +\r
596                                 "        access_log off;\n" +\r
597                                 "        stub_status;\n" +\r
598                                 "    }\n" +\r
599                                 "}\n" +\r
600                                 "",\r
601                 },\r
602                 ObjectMeta: metav1.ObjectMeta{\r
603                         Labels: map[string]string{\r
604                                 "app.kubernetes.io/version":    "1.4",\r
605                                 "helm.sh/chart":                "kong-0.36.6",\r
606                                 "app.kubernetes.io/instance":   "release-name",\r
607                                 "app.kubernetes.io/managed-by": "Helm",\r
608                                 "app.kubernetes.io/name":       "kong",\r
609                         },\r
610                         Name: "release-name-kong-default-custom-server-blocks",\r
611                 },\r
612         }\r
613 \r
614         configMap23 := &corev1.ConfigMap{\r
615                 TypeMeta: metav1.TypeMeta{\r
616                         APIVersion: "v1",\r
617                         Kind:       "ConfigMap",\r
618                 },\r
619                 Data: map[string]string{\r
620                         "config-file.json": "{\n" +\r
621                                 "    \"local\": {\n" +\r
622                                 "        \"host\": \":8080\"\n" +\r
623                                 "    },\n" +\r
624                                 "    \"logger\": {\n" +\r
625                                 "        \"level\": 4\n" +\r
626                                 "    },\n" +\r
627                                 "    \"db\": {\n" +\r
628                                 "        \"namespaces\": [\"sdl\", \"rnib\"]\n" +\r
629                                 "    },\n" +\r
630                                 "    \"rmr\": {\n" +\r
631                                 "        \"protPort\": \"tcp:4560\",\n" +\r
632                                 "        \"maxSize\": 65536,\n" +\r
633                                 "        \"numWorkers\": 1\n" +\r
634                                 "    },\n" +\r
635                                 "    \"sbi\": {\n" +\r
636                                 "        \"appmgrAddr\": \"service-ricplt-appmgr-http:8080\",\n" +\r
637                                 "        \"alertmgrAddr\": \"r4-infrastructure-prometheus-alertmanager:80\",\n" +\r
638                                 "        \"timeout\": 30\n" +\r
639                                 "    },\n" +\r
640                                 "    \"nbi\": {\n" +\r
641                                 "        \"schemas\": [\"o-ran-sc-ric-xapp-desc-v1\", \"o-ran-sc-ric-ueec-config-v1\"]\n" +\r
642                                 "    },\n" +\r
643                                 "    \"controls\": {\n" +\r
644                                 "        \"active\": true\n" +\r
645                                 "    }\n" +\r
646                                 "}\n" +\r
647                                 "\n" +\r
648                                 "",\r
649                         "uta_rtg.rt": "newrt|start\n" +\r
650                                 "rte|13111|127.0.0.1:4588\n" +\r
651                                 "rte|13111|127.0.0.1:4560\n" +\r
652                                 "newrt|end\n" +\r
653                                 "",\r
654                 },\r
655                 ObjectMeta: metav1.ObjectMeta{\r
656                         Name:      "configmap-ricplt-o1mediator-appconfig-configmap",\r
657                         Namespace: "ricplt",\r
658                 },\r
659         }\r
660 \r
661         configMap24 := &corev1.ConfigMap{\r
662                 Data: map[string]string{\r
663                         "RMR_SEED_RT": "/etc/o1agent/uta_rtg.rt",\r
664                 },\r
665                 ObjectMeta: metav1.ObjectMeta{\r
666                         Name: "configmap-ricplt-o1mediator-env",\r
667                 },\r
668                 TypeMeta: metav1.TypeMeta{\r
669                         Kind:       "ConfigMap",\r
670                         APIVersion: "v1",\r
671                 },\r
672         }\r
673 \r
674         configMap25 := &corev1.ConfigMap{\r
675                 ObjectMeta: metav1.ObjectMeta{\r
676                         Labels: map[string]string{\r
677                                 "chart":     "prometheus-11.3.0",\r
678                                 "component": "alertmanager",\r
679                                 "heritage":  "Helm",\r
680                                 "release":   "release-name",\r
681                                 "app":       "prometheus",\r
682                         },\r
683                         Name:      "release-name-prometheus-alertmanager",\r
684                         Namespace: "ricplt",\r
685                 },\r
686                 TypeMeta: metav1.TypeMeta{\r
687                         APIVersion: "v1",\r
688                         Kind:       "ConfigMap",\r
689                 },\r
690                 Data: map[string]string{\r
691                         "alertmanager.yml": "global:\n" +\r
692                                 "  resolve_timeout: 5m\n" +\r
693                                 "receivers:\n" +\r
694                                 "- name: vespa\n" +\r
695                                 "  webhook_configs:\n" +\r
696                                 "  - url: http://service-ricplt-vespamgr-http:9095/alerts\n" +\r
697                                 "route:\n" +\r
698                                 "  group_by:\n" +\r
699                                 "  - alertname\n" +\r
700                                 "  - severity\n" +\r
701                                 "  - instance\n" +\r
702                                 "  - job\n" +\r
703                                 "  group_interval: 3m\n" +\r
704                                 "  group_wait: 5s\n" +\r
705                                 "  receiver: vespa\n" +\r
706                                 "  repeat_interval: 1h\n" +\r
707                                 "  routes:\n" +\r
708                                 "  - continue: true\n" +\r
709                                 "    receiver: vespa\n" +\r
710                                 "",\r
711                 },\r
712         }\r
713 \r
714         configMap26 := &corev1.ConfigMap{\r
715                 Data: map[string]string{\r
716                         "alerting_rules.yml": "{}\n" +\r
717                                 "",\r
718                         "alerts": "{}\n" +\r
719                                 "",\r
720                         "prometheus.yml": "global:\n" +\r
721                                 "  evaluation_interval: 1m\n" +\r
722                                 "  scrape_interval: 1m\n" +\r
723                                 "  scrape_timeout: 10s\n" +\r
724                                 "rule_files:\n" +\r
725                                 "- /etc/config/recording_rules.yml\n" +\r
726                                 "- /etc/config/alerting_rules.yml\n" +\r
727                                 "- /etc/config/rules\n" +\r
728                                 "- /etc/config/alerts\n" +\r
729                                 "scrape_configs:\n" +\r
730                                 "- job_name: prometheus\n" +\r
731                                 "  static_configs:\n" +\r
732                                 "  - targets:\n" +\r
733                                 "    - localhost:9090\n" +\r
734                                 "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
735                                 "  job_name: kubernetes-apiservers\n" +\r
736                                 "  kubernetes_sd_configs:\n" +\r
737                                 "  - role: endpoints\n" +\r
738                                 "  relabel_configs:\n" +\r
739                                 "  - action: keep\n" +\r
740                                 "    regex: default;kubernetes;https\n" +\r
741                                 "    source_labels:\n" +\r
742                                 "    - __meta_kubernetes_namespace\n" +\r
743                                 "    - __meta_kubernetes_service_name\n" +\r
744                                 "    - __meta_kubernetes_endpoint_port_name\n" +\r
745                                 "  scheme: https\n" +\r
746                                 "  tls_config:\n" +\r
747                                 "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
748                                 "    insecure_skip_verify: true\n" +\r
749                                 "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
750                                 "  job_name: kubernetes-nodes\n" +\r
751                                 "  kubernetes_sd_configs:\n" +\r
752                                 "  - role: node\n" +\r
753                                 "  relabel_configs:\n" +\r
754                                 "  - action: labelmap\n" +\r
755                                 "    regex: __meta_kubernetes_node_label_(.+)\n" +\r
756                                 "  - replacement: kubernetes.default.svc:443\n" +\r
757                                 "    target_label: __address__\n" +\r
758                                 "  - regex: (.+)\n" +\r
759                                 "    replacement: /api/v1/nodes/$1/proxy/metrics\n" +\r
760                                 "    source_labels:\n" +\r
761                                 "    - __meta_kubernetes_node_name\n" +\r
762                                 "    target_label: __metrics_path__\n" +\r
763                                 "  scheme: https\n" +\r
764                                 "  tls_config:\n" +\r
765                                 "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
766                                 "    insecure_skip_verify: true\n" +\r
767                                 "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
768                                 "  job_name: kubernetes-nodes-cadvisor\n" +\r
769                                 "  kubernetes_sd_configs:\n" +\r
770                                 "  - role: node\n" +\r
771                                 "  relabel_configs:\n" +\r
772                                 "  - action: labelmap\n" +\r
773                                 "    regex: __meta_kubernetes_node_label_(.+)\n" +\r
774                                 "  - replacement: kubernetes.default.svc:443\n" +\r
775                                 "    target_label: __address__\n" +\r
776                                 "  - regex: (.+)\n" +\r
777                                 "    replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor\n" +\r
778                                 "    source_labels:\n" +\r
779                                 "    - __meta_kubernetes_node_name\n" +\r
780                                 "    target_label: __metrics_path__\n" +\r
781                                 "  scheme: https\n" +\r
782                                 "  tls_config:\n" +\r
783                                 "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
784                                 "    insecure_skip_verify: true\n" +\r
785                                 "- job_name: kubernetes-service-endpoints\n" +\r
786                                 "  kubernetes_sd_configs:\n" +\r
787                                 "  - role: endpoints\n" +\r
788                                 "  relabel_configs:\n" +\r
789                                 "  - action: keep\n" +\r
790                                 "    regex: true\n" +\r
791                                 "    source_labels:\n" +\r
792                                 "    - __meta_kubernetes_service_annotation_prometheus_io_scrape\n" +\r
793                                 "  - action: replace\n" +\r
794                                 "    regex: (https?)\n" +\r
795                                 "    source_labels:\n" +\r
796                                 "    - __meta_kubernetes_service_annotation_prometheus_io_scheme\n" +\r
797                                 "    target_label: __scheme__\n" +\r
798                                 "  - action: replace\n" +\r
799                                 "    regex: (.+)\n" +\r
800                                 "    source_labels:\n" +\r
801                                 "    - __meta_kubernetes_service_annotation_prometheus_io_path\n" +\r
802                                 "    target_label: __metrics_path__\n" +\r
803                                 "  - action: replace\n" +\r
804                                 "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
805                                 "    replacement: $1:$2\n" +\r
806                                 "    source_labels:\n" +\r
807                                 "    - __address__\n" +\r
808                                 "    - __meta_kubernetes_service_annotation_prometheus_io_port\n" +\r
809                                 "    target_label: __address__\n" +\r
810                                 "  - action: labelmap\n" +\r
811                                 "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
812                                 "  - action: replace\n" +\r
813                                 "    source_labels:\n" +\r
814                                 "    - __meta_kubernetes_namespace\n" +\r
815                                 "    target_label: kubernetes_namespace\n" +\r
816                                 "  - action: replace\n" +\r
817                                 "    source_labels:\n" +\r
818                                 "    - __meta_kubernetes_service_name\n" +\r
819                                 "    target_label: kubernetes_name\n" +\r
820                                 "  - action: replace\n" +\r
821                                 "    source_labels:\n" +\r
822                                 "    - __meta_kubernetes_pod_node_name\n" +\r
823                                 "    target_label: kubernetes_node\n" +\r
824                                 "- job_name: kubernetes-service-endpoints-slow\n" +\r
825                                 "  kubernetes_sd_configs:\n" +\r
826                                 "  - role: endpoints\n" +\r
827                                 "  relabel_configs:\n" +\r
828                                 "  - action: keep\n" +\r
829                                 "    regex: true\n" +\r
830                                 "    source_labels:\n" +\r
831                                 "    - __meta_kubernetes_service_annotation_prometheus_io_scrape_slow\n" +\r
832                                 "  - action: replace\n" +\r
833                                 "    regex: (https?)\n" +\r
834                                 "    source_labels:\n" +\r
835                                 "    - __meta_kubernetes_service_annotation_prometheus_io_scheme\n" +\r
836                                 "    target_label: __scheme__\n" +\r
837                                 "  - action: replace\n" +\r
838                                 "    regex: (.+)\n" +\r
839                                 "    source_labels:\n" +\r
840                                 "    - __meta_kubernetes_service_annotation_prometheus_io_path\n" +\r
841                                 "    target_label: __metrics_path__\n" +\r
842                                 "  - action: replace\n" +\r
843                                 "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
844                                 "    replacement: $1:$2\n" +\r
845                                 "    source_labels:\n" +\r
846                                 "    - __address__\n" +\r
847                                 "    - __meta_kubernetes_service_annotation_prometheus_io_port\n" +\r
848                                 "    target_label: __address__\n" +\r
849                                 "  - action: labelmap\n" +\r
850                                 "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
851                                 "  - action: replace\n" +\r
852                                 "    source_labels:\n" +\r
853                                 "    - __meta_kubernetes_namespace\n" +\r
854                                 "    target_label: kubernetes_namespace\n" +\r
855                                 "  - action: replace\n" +\r
856                                 "    source_labels:\n" +\r
857                                 "    - __meta_kubernetes_service_name\n" +\r
858                                 "    target_label: kubernetes_name\n" +\r
859                                 "  - action: replace\n" +\r
860                                 "    source_labels:\n" +\r
861                                 "    - __meta_kubernetes_pod_node_name\n" +\r
862                                 "    target_label: kubernetes_node\n" +\r
863                                 "  scrape_interval: 5m\n" +\r
864                                 "  scrape_timeout: 30s\n" +\r
865                                 "- honor_labels: true\n" +\r
866                                 "  job_name: prometheus-pushgateway\n" +\r
867                                 "  kubernetes_sd_configs:\n" +\r
868                                 "  - role: service\n" +\r
869                                 "  relabel_configs:\n" +\r
870                                 "  - action: keep\n" +\r
871                                 "    regex: pushgateway\n" +\r
872                                 "    source_labels:\n" +\r
873                                 "    - __meta_kubernetes_service_annotation_prometheus_io_probe\n" +\r
874                                 "- job_name: kubernetes-services\n" +\r
875                                 "  kubernetes_sd_configs:\n" +\r
876                                 "  - role: service\n" +\r
877                                 "  metrics_path: /probe\n" +\r
878                                 "  params:\n" +\r
879                                 "    module:\n" +\r
880                                 "    - http_2xx\n" +\r
881                                 "  relabel_configs:\n" +\r
882                                 "  - action: keep\n" +\r
883                                 "    regex: true\n" +\r
884                                 "    source_labels:\n" +\r
885                                 "    - __meta_kubernetes_service_annotation_prometheus_io_probe\n" +\r
886                                 "  - source_labels:\n" +\r
887                                 "    - __address__\n" +\r
888                                 "    target_label: __param_target\n" +\r
889                                 "  - replacement: blackbox\n" +\r
890                                 "    target_label: __address__\n" +\r
891                                 "  - source_labels:\n" +\r
892                                 "    - __param_target\n" +\r
893                                 "    target_label: instance\n" +\r
894                                 "  - action: labelmap\n" +\r
895                                 "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
896                                 "  - source_labels:\n" +\r
897                                 "    - __meta_kubernetes_namespace\n" +\r
898                                 "    target_label: kubernetes_namespace\n" +\r
899                                 "  - source_labels:\n" +\r
900                                 "    - __meta_kubernetes_service_name\n" +\r
901                                 "    target_label: kubernetes_name\n" +\r
902                                 "- job_name: kubernetes-pods\n" +\r
903                                 "  kubernetes_sd_configs:\n" +\r
904                                 "  - role: pod\n" +\r
905                                 "  relabel_configs:\n" +\r
906                                 "  - action: keep\n" +\r
907                                 "    regex: true\n" +\r
908                                 "    source_labels:\n" +\r
909                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_scrape\n" +\r
910                                 "  - action: replace\n" +\r
911                                 "    regex: (.+)\n" +\r
912                                 "    source_labels:\n" +\r
913                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_path\n" +\r
914                                 "    target_label: __metrics_path__\n" +\r
915                                 "  - action: replace\n" +\r
916                                 "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
917                                 "    replacement: $1:$2\n" +\r
918                                 "    source_labels:\n" +\r
919                                 "    - __address__\n" +\r
920                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_port\n" +\r
921                                 "    target_label: __address__\n" +\r
922                                 "  - action: labelmap\n" +\r
923                                 "    regex: __meta_kubernetes_pod_label_(.+)\n" +\r
924                                 "  - action: replace\n" +\r
925                                 "    source_labels:\n" +\r
926                                 "    - __meta_kubernetes_namespace\n" +\r
927                                 "    target_label: kubernetes_namespace\n" +\r
928                                 "  - action: replace\n" +\r
929                                 "    source_labels:\n" +\r
930                                 "    - __meta_kubernetes_pod_name\n" +\r
931                                 "    target_label: kubernetes_pod_name\n" +\r
932                                 "- job_name: kubernetes-pods-slow\n" +\r
933                                 "  kubernetes_sd_configs:\n" +\r
934                                 "  - role: pod\n" +\r
935                                 "  relabel_configs:\n" +\r
936                                 "  - action: keep\n" +\r
937                                 "    regex: true\n" +\r
938                                 "    source_labels:\n" +\r
939                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_scrape_slow\n" +\r
940                                 "  - action: replace\n" +\r
941                                 "    regex: (.+)\n" +\r
942                                 "    source_labels:\n" +\r
943                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_path\n" +\r
944                                 "    target_label: __metrics_path__\n" +\r
945                                 "  - action: replace\n" +\r
946                                 "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
947                                 "    replacement: $1:$2\n" +\r
948                                 "    source_labels:\n" +\r
949                                 "    - __address__\n" +\r
950                                 "    - __meta_kubernetes_pod_annotation_prometheus_io_port\n" +\r
951                                 "    target_label: __address__\n" +\r
952                                 "  - action: labelmap\n" +\r
953                                 "    regex: __meta_kubernetes_pod_label_(.+)\n" +\r
954                                 "  - action: replace\n" +\r
955                                 "    source_labels:\n" +\r
956                                 "    - __meta_kubernetes_namespace\n" +\r
957                                 "    target_label: kubernetes_namespace\n" +\r
958                                 "  - action: replace\n" +\r
959                                 "    source_labels:\n" +\r
960                                 "    - __meta_kubernetes_pod_name\n" +\r
961                                 "    target_label: kubernetes_pod_name\n" +\r
962                                 "  scrape_interval: 5m\n" +\r
963                                 "  scrape_timeout: 30s\n" +\r
964                                 "alerting:\n" +\r
965                                 "  alertmanagers:\n" +\r
966                                 "  - kubernetes_sd_configs:\n" +\r
967                                 "      - role: pod\n" +\r
968                                 "    tls_config:\n" +\r
969                                 "      ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
970                                 "    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
971                                 "    relabel_configs:\n" +\r
972                                 "    - source_labels: [__meta_kubernetes_namespace]\n" +\r
973                                 "      regex: ricplt\n" +\r
974                                 "      action: keep\n" +\r
975                                 "    - source_labels: [__meta_kubernetes_pod_label_app]\n" +\r
976                                 "      regex: prometheus\n" +\r
977                                 "      action: keep\n" +\r
978                                 "    - source_labels: [__meta_kubernetes_pod_label_component]\n" +\r
979                                 "      regex: alertmanager\n" +\r
980                                 "      action: keep\n" +\r
981                                 "    - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe]\n" +\r
982                                 "      regex: .*\n" +\r
983                                 "      action: keep\n" +\r
984                                 "    - source_labels: [__meta_kubernetes_pod_container_port_number]\n" +\r
985                                 "      regex:\n" +\r
986                                 "      action: drop\n" +\r
987                                 "",\r
988                         "recording_rules.yml": "{}\n" +\r
989                                 "",\r
990                         "rules": "{}\n" +\r
991                                 "",\r
992                 },\r
993                 ObjectMeta: metav1.ObjectMeta{\r
994                         Namespace: "ricplt",\r
995                         Labels: map[string]string{\r
996                                 "chart":     "prometheus-11.3.0",\r
997                                 "component": "server",\r
998                                 "heritage":  "Helm",\r
999                                 "release":   "release-name",\r
1000                                 "app":       "prometheus",\r
1001                         },\r
1002                         Name: "release-name-prometheus-server",\r
1003                 },\r
1004                 TypeMeta: metav1.TypeMeta{\r
1005                         APIVersion: "v1",\r
1006                         Kind:       "ConfigMap",\r
1007                 },\r
1008         }\r
1009 \r
1010         configMap27 := &corev1.ConfigMap{\r
1011                 Data: map[string]string{\r
1012                         "update-node.sh": "#!/bin/sh\n" +\r
1013                                 "REDIS_NODES=\"/data/nodes.conf\"\n" +\r
1014                                 "sed -i -e \"/myself/ s/[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}/${POD_IP}/\" ${REDIS_NODES}\n" +\r
1015                                 "exec \"$@\"\n" +\r
1016                                 "",\r
1017                         "redis.conf": "cluster-enabled yes\n" +\r
1018                                 "cluster-require-full-coverage no\n" +\r
1019                                 "cluster-node-timeout 15000\n" +\r
1020                                 "cluster-config-file /data/nodes.conf\n" +\r
1021                                 "cluster-migration-barrier 1\n" +\r
1022                                 "appendonly yes\n" +\r
1023                                 "protected-mode no",\r
1024                 },\r
1025                 ObjectMeta: metav1.ObjectMeta{\r
1026                         Name: "redis-cluster-cm",\r
1027                 },\r
1028                 TypeMeta: metav1.TypeMeta{\r
1029                         APIVersion: "v1",\r
1030                         Kind:       "ConfigMap",\r
1031                 },\r
1032         }\r
1033 \r
1034         configMap28 := &corev1.ConfigMap{\r
1035                 TypeMeta: metav1.TypeMeta{\r
1036                         Kind:       "ConfigMap",\r
1037                         APIVersion: "v1",\r
1038                 },\r
1039                 Data: map[string]string{\r
1040                         "placenode.pl": "#!/usr/bin/env perl\n" +\r
1041                                 "=head\n" +\r
1042                                 "============LICENSE_START=======================================================\n" +\r
1043                                 "\n" +\r
1044                                 "================================================================================\n" +\r
1045                                 "Copyright (C) 2020 Hcl Technologies Limited.\n" +\r
1046                                 "================================================================================\n" +\r
1047                                 "Licensed under the Apache License, Version 2.0 (the \"License\");\n" +\r
1048                                 "you may not use this file except in compliance with the License.\n" +\r
1049                                 "You may obtain a copy of the License at\n" +\r
1050                                 "\n" +\r
1051                                 "     http://www.apache.org/licenses/LICENSE-2.0\n" +\r
1052                                 "\n" +\r
1053                                 "Unless required by applicable law or agreed to in writing, software\n" +\r
1054                                 "distributed under the License is distributed on an \"AS IS\" BASIS,\n" +\r
1055                                 "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +\r
1056                                 "See the License for the specific language governing permissions and\n" +\r
1057                                 "limitations under the License.\n" +\r
1058                                 "============LICENSE_END=========================================================\n" +\r
1059                                 "\n" +\r
1060                                 "\n" +\r
1061                                 "About:\n" +\r
1062                                 "\n" +\r
1063                                 "This script has been developed as part of https://jira.o-ran-sc.org/browse/RIC-360\n" +\r
1064                                 "This script identifies the missing anti-affinity(as per above ticket) of redis instances \n" +\r
1065                                 "required in a redis-cluster. If there is an  undesired  anti-affinity this script can  be \n" +\r
1066                                 "executed to communicate to redis nodes  to switch roles (e.g. master/slave) such that the \n" +\r
1067                                 "end-state meets the desired anti-affinity.\n" +\r
1068                                 "       \n" +\r
1069                                 "\n" +\r
1070                                 "Pre-requisites: \n" +\r
1071                                 "\n" +\r
1072                                 "  1) A redis cluster with 3 masters (2 replicas each) deployed on kubernetes 1.18 (or later) \n" +\r
1073                                 "  2) Three available worker nodes for serving redis workloads\n" +\r
1074                                 "  3) kubectl (with access to the k8 cluster)\n" +\r
1075                                 "\n" +\r
1076                                 "=cut\n" +\r
1077                                 "\n" +\r
1078                                 "\n" +\r
1079                                 "my $podRow = { \n" +\r
1080                                 "\"podIP\"      => \"\",\n" +\r
1081                                 "\"podName\"    => \"\",\n" +\r
1082                                 "\"k8Node\"     => \"\",\n" +\r
1083                                 "\n" +\r
1084                                 "\"rdNodeRole\" => \"\",\n" +\r
1085                                 "\"rdNodeID\"   => \"\",\n" +\r
1086                                 "\n" +\r
1087                                 "\"rdMasterNodeID\"   => \"\",\n" +\r
1088                                 "\"slaveIPs\"    => [] \n" +\r
1089                                 "};\n" +\r
1090                                 "\n" +\r
1091                                 "# Pod label for redis nodes\n" +\r
1092                                 "my $podLabel = $ENV{'POD_LABEL'};\n" +\r
1093                                 "\n" +\r
1094                                 "my $podTable =   [];\n" +\r
1095                                 "my $k8NodeInfo = [];\n" +\r
1096                                 "\n" +\r
1097                                 "setk8NodesInfo();\n" +\r
1098                                 "validate();\n" +\r
1099                                 "\n" +\r
1100                                 "# Master\n" +\r
1101                                 "spreadMastersIfRequired();\n" +\r
1102                                 "# Slave\n" +\r
1103                                 "my $disparity = getSlaveDisparity();\n" +\r
1104                                 "spreadSlavesIfRequired();\n" +\r
1105                                 "\n" +\r
1106                                 "sub validate() {\n" +\r
1107                                 "    my @masters = map { $_->{'rdNodeRole'} eq 'master' ? $_ : () } @{$podTable};\n" +\r
1108                                 "       if ( @masters > @{$k8NodeInfo->{allk8Nodes}} ) {\n" +\r
1109                                 "               print \"Info: Skipping any action as num of master > number of k8 nodes..\\n\";\n" +\r
1110                                 "           exit;\n" +\r
1111                                 "       }\n" +\r
1112                                 "}\n" +\r
1113                                 "\n" +\r
1114                                 "\n" +\r
1115                                 "sub spreadSlavesIfRequired() {\n" +\r
1116                                 "    \n" +\r
1117                                 "\n" +\r
1118                                 "       # Get node with maximum disparity first\n" +\r
1119                                 "    my @disparityMatrix = reverse sort { @{$a} <=> @{$b} } @${disparity}; \n" +\r
1120                                 "    #@disparityMatrix = grep defined, @disparityMatrix;\n" +\r
1121                                 "    #@disparityMatrix = map { defined $_ ? $_ : () } @disparityMatrix;\n" +\r
1122                                 "\n" +\r
1123                                 "    # Get list of slaves to be swapped roles.\n" +\r
1124                                 "    my @slaveSwapList = ();\n" +\r
1125                                 "    my $maxDisparityPerNode = @{$disparityMatrix[0]};\n" +\r
1126                                 "\n" +\r
1127                                 "    for (my $disparityPass=0; $disparityPass < $maxDisparityPerNode; $disparityPass++) {\n" +\r
1128                                 "        for (my $k8NodeIndex=0; $k8NodeIndex <= $#{disparityMatrix}; $k8NodeIndex++) {\n" +\r
1129                                 "                  #print \"$disparityMatrix[$disparityPass] && $disparityMatrix[$k8NodeIndex][$disparityPass]\";\n" +\r
1130                                 "           if ( $disparityMatrix[$disparityPass] && $disparityMatrix[$k8NodeIndex][$disparityPass] ) {\n" +\r
1131                                 "                           push(@slaveSwapList,$disparityMatrix[$k8NodeIndex][$disparityPass]);\n" +\r
1132                                 "                  }\n" +\r
1133                                 "        }\n" +\r
1134                                 "    }\n" +\r
1135                                 "    if ( ! @slaveSwapList ) {\n" +\r
1136                                 "       print \"Info: No disparity found with slaves.\\n\" if ( @slaveSwapList < 2);\n" +\r
1137                                 "               exit;\n" +\r
1138                                 "       } elsif ( @slaveSwapList == 1 ) {\n" +\r
1139                                 "       print \"Info: single host scenario (with no swap candidate in other k8 nodes) found.\\n\";\n" +\r
1140                                 "               exit;\n" +\r
1141                                 "       } else {\n" +\r
1142                                 "       print \"Info: slave disparity found.\\n\";\n" +\r
1143                                 "    }\n" +\r
1144                                 "\n" +\r
1145                                 "       # Swap slaves \n" +\r
1146                                 "       for (my $swapIndex=0; $swapIndex < @slaveSwapList; $swapIndex++) {\n" +\r
1147                                 "               $pod1 = $slaveSwapList[$swapIndex];\n" +\r
1148                                 "               $pod2 = $slaveSwapList[++$swapIndex];\n" +\r
1149                                 "               #print \"Info: Swapping Slaves: \" . join($pod1->{podName}, $pod2->{podName}) . \"\\n\";\n" +\r
1150                                 "               \n" +\r
1151                                 "               my $cmd1 = qq[kubectl exec -it ].\n" +\r
1152                                 "                                  qq[$pod1->{podName}  -- redis-cli -p 6379 cluster replicate $pod2->{rdMasterNodeID} ];\n" +\r
1153                                 "               \n" +\r
1154                                 "               my $cmd2 = qq[kubectl exec -it ].\n" +\r
1155                                 "                                  qq[$pod2->{podName}  -- redis-cli -p 6379 cluster replicate $pod1->{rdMasterNodeID} ];\n" +\r
1156                                 "\n" +\r
1157                                 "           runRediClusterCmd($cmd1);\n" +\r
1158                                 "           runRediClusterCmd($cmd2);\n" +\r
1159                                 "               #print \"\\n$cmd1\";\n" +\r
1160                                 "               #print \"\\n$cmd2\\n\";\n" +\r
1161                                 "    }\n" +\r
1162                                 "\n" +\r
1163                                 "}\n" +\r
1164                                 "\n" +\r
1165                                 "\n" +\r
1166                                 "sub getSlaveDisparity() {\n" +\r
1167                                 "\n" +\r
1168                                 "    # Get Slave Disparity Metrix\n" +\r
1169                                 "    my $disparity = ();\n" +\r
1170                                 "    my $nodeIndex = 0;\n" +\r
1171                                 "    foreach my $k8NodeName ( @{$k8NodeInfo->{allk8Nodes}} ) {\n" +\r
1172                                 "        my @redisNodesOnk8Node = map { $_->{'k8Node'} eq $k8NodeName ? $_ : () } @{$podTable};\n" +\r
1173                                 "        @redisNodesOnk8Node    = sort { $a->{\"rdNodeRole\"} cmp $b->{\"rdNodeRole\"} } @redisNodesOnk8Node;\n" +\r
1174                                 "\n" +\r
1175                                 "        my $master = shift @redisNodesOnk8Node;\n" +\r
1176                                 "        \n" +\r
1177                                 "        for (my $index=0; $index <= $#{redisNodesOnk8Node}; $index++ ) {\n" +\r
1178                                 "            my $slave = $redisNodesOnk8Node[$index];\n" +\r
1179                                 "            #print \"chekcing for pod:  $slave->{podName}\\n\";\n" +\r
1180                                 "            my $disparityFound = 0;\n" +\r
1181                                 "            if ( $slave->{rdMasterNodeID} eq $master->{rdNodeID} ) {\n" +\r
1182                                 "               $disparityFound = 1;\n" +\r
1183                                 "            } else {\n" +\r
1184                                 "               #check is other slaves are its sibling\n" +\r
1185                                 "               for (my $nextIndex=$index + 1; $nextIndex <= $#{redisNodesOnk8Node}; $nextIndex++ ) {\n" +\r
1186                                 "                   if ( $slave->{rdMasterNodeID} eq $redisNodesOnk8Node[$nextIndex]->{rdMasterNodeID} ) {\n" +\r
1187                                 "                          $disparityFound = 1;\n" +\r
1188                                 "                       break;\n" +\r
1189                                 "                   }\n" +\r
1190                                 "               }\n" +\r
1191                                 "            }\n" +\r
1192                                 "                       if ($disparityFound) {\n" +\r
1193                                 "               #$disparity[$nodeIndex][$index] = { 'podName' => $slave->{\"podName\"}, 'rdMasterNodeID' => $slave->{\"rdMasterNodeID\"} } ;\n" +\r
1194                                 "               push(@{$disparity[$nodeIndex]},{ 'podName' => $slave->{\"podName\"}, 'rdMasterNodeID' => $slave->{\"rdMasterNodeID\"} } ) ;\n" +\r
1195                                 "                       }\n" +\r
1196                                 "        }\n" +\r
1197                                 "        $nodeIndex++;\n" +\r
1198                                 "    }\n" +\r
1199                                 "        return \\@disparity;\n" +\r
1200                                 "}\n" +\r
1201                                 "\n" +\r
1202                                 "sub spreadMastersIfRequired() {\n" +\r
1203                                 "\n" +\r
1204                                 "   NODE_WITH_NO_MASTER: foreach my $nodeWithoutMaster (@{$k8NodeInfo->{k8NodesWithoutMaster}}) {\n" +\r
1205                                 "      # For each k8Node without any master \n" +\r
1206                                 "      #    Check for each extra master on its hostNode\n" +\r
1207                                 "      #        Find its slave on the this hostNode (i.e. without any master) \n" +\r
1208                                 "      # Such slave must be Found for 3x3 set-up:\n" +\r
1209                                 "      # Then Promote as master # Re-Evaluate\n" +\r
1210                                 "\n" +\r
1211                                 "      # Get All Redis Slaves on This k8 node\n" +\r
1212                                 "      print \"Info: K8 node without any master : $nodeWithoutMaster\\n\";\n" +\r
1213                                 "      my @rdSlaveNodes =  map { ($_->{'k8Node'} eq $nodeWithoutMaster ) && ($_->{'rdNodeRole'} eq 'slave') ? $_ : () } @{$podTable};\n" +\r
1214                                 "\n" +\r
1215                                 "           foreach my $nodeWithExtraMaster (@{$k8NodeInfo->{k8NodesWithExtraMaster}} ) {\n" +\r
1216                                 "              print \"Info: k8 Node with extra master : $nodeWithExtraMaster\\n\";\n" +\r
1217                                 "              #my @rdSlaveNodes =  map { ($_->{'k8Node'} eq $nodeWithoutMaster ) && ($_->{'rdNodeRole'} eq 'slave') ? $_ : () } @{$podTable};\n" +\r
1218                                 "\n" +\r
1219                                 "              my @masterInstances = map { ($_->{'k8Node'} eq $nodeWithExtraMaster ) && ($_->{'rdNodeRole'} eq 'master') ? $_ : () } @{$podTable};        \n" +\r
1220                                 "              foreach my $master (@masterInstances) {\n" +\r
1221                                 "                  my @slave = map { $_->{\"rdMasterNodeID\"} eq $master->{rdNodeID} ? $_ : () } @rdSlaveNodes;\n" +\r
1222                                 "                  if ( @slave ) {\n" +\r
1223                                 "                      promoteSlaveAsMaster($slave[0]);\n" +\r
1224                                 "                                         my $isPromoted = 0;\n" +\r
1225                                 "                                     my $slaveNodeID= $slave[0]->{rdNodeID};\n" +\r
1226                                 "                                         while( ! $isPromoted ) {\n" +\r
1227                                 "                                                sleep(8);\n" +\r
1228                                 "                                            setk8NodesInfo();\n" +\r
1229                                 "                                                my ($promotedNode) = map { $slaveNodeID eq $_->{rdNodeID} ? $_ : () } @{$podTable};\n" +\r
1230                                 "\n" +\r
1231                                 "                                                if ( $promotedNode->{'rdNodeRole'} ne 'master' ) {\n" +\r
1232                                 "                                                       print (\"Info: Waiting for node promotion confirmation..\\n\");\n" +\r
1233                                 "                                                } else {\n" +\r
1234                                 "                                                       $isPromoted = 1;\n" +\r
1235                                 "                                                       print (\"Info: Node promotion confirmed.\\n\");\n" +\r
1236                                 "                                                }\n" +\r
1237                                 "                                         }\n" +\r
1238                                 "                      next NODE_WITH_NO_MASTER;\n" +\r
1239                                 "                  }\n" +\r
1240                                 "              }\n" +\r
1241                                 "           }\n" +\r
1242                                 "   }\n" +\r
1243                                 "   print \"Info: All redis masters are on separate k8 Nodes. \\n\"    if ( ! @{$k8NodeInfo->{k8NodesWithoutMaster}}) ;\n" +\r
1244                                 "}\n" +\r
1245                                 "\n" +\r
1246                                 "sub promoteSlaveAsMaster() {\n" +\r
1247                                 "    my $slavePod = shift;    \n" +\r
1248                                 "    #print \"Info: Promoting Slave $slavePod->{'podName'} On $slavePod->{'k8Node'} as master\";\n" +\r
1249                                 "    my $cmd = qq[kubectl exec -it $slavePod->{'podName'} -- redis-cli -p 6379 cluster failover takeover];\n" +\r
1250                                 "    runRediClusterCmd($cmd);\n" +\r
1251                                 "    \n" +\r
1252                                 "}\n" +\r
1253                                 "sub runRediClusterCmd() {\n" +\r
1254                                 "  my $cmd = shift;    \n" +\r
1255                                 "  print \"Info: Running Cmd:$cmd \\n\";\n" +\r
1256                                 "  `$cmd;`;\n" +\r
1257                                 "  sleep(8);\n" +\r
1258                                 "}\n" +\r
1259                                 "\n" +\r
1260                                 "\n" +\r
1261                                 "#foreach my $item (@{$podTable}) {\n" +\r
1262                                 "#}\n" +\r
1263                                 "\n" +\r
1264                                 "# find_nodes_without-a-single_master\n" +\r
1265                                 "sub setk8NodesInfo() {\n" +\r
1266                                 "\n" +\r
1267                                 "   $podTable   = [];\n" +\r
1268                                 "   $k8NodeInfo = [];\n" +\r
1269                                 "\n" +\r
1270                                 "   getCurrentStatus();\n" +\r
1271                                 "   # All k8 nodes\n" +\r
1272                                 "   my @k8NodeList = uniq(map { $_->{'k8Node'} } @$podTable);\n" +\r
1273                                 "\n" +\r
1274                                 "   # Find Nodes with At least One master\n" +\r
1275                                 "   my @k8NodesWithMaster;\n" +\r
1276                                 "   foreach my $nodeName (@k8NodeList) {\n" +\r
1277                                 "      push(@k8NodesWithMaster, map { ($_->{'k8Node'} eq $nodeName) && ($_->{'rdNodeRole'} eq 'master')   ? $nodeName : ()  } @{$podTable} );\n" +\r
1278                                 "   }\n" +\r
1279                                 "\n" +\r
1280                                 "   # Find Nodes without any master = All nodes - Nodes with at least one Master\n" +\r
1281                                 "   my %k8NodesMap = ();\n" +\r
1282                                 "   foreach (@k8NodesWithMaster) { \n" +\r
1283                                 "           if ( exists $k8NodesMap{$_} ) {\n" +\r
1284                                 "                   $k8NodesMap{$_}++;\n" +\r
1285                                 "           } else {\n" +\r
1286                                 "                   $k8NodesMap{$_} = 1;\n" +\r
1287                                 "           }\n" +\r
1288                                 "   }\n" +\r
1289                                 "   my @k8NodesWithoutMaster = map { exists $k8NodesMap{$_} ? () : $_ } @k8NodeList;\n" +\r
1290                                 "   my @k8NodesWithExtraMaster = uniq(map { $k8NodesMap{$_} > 1 ? $_ : () } @k8NodesWithMaster);\n" +\r
1291                                 "\n" +\r
1292                                 "   $k8NodeInfo = { 'allk8Nodes' => \\@k8NodeList, 'k8NodesWithExtraMaster' => \\@k8NodesWithExtraMaster, 'k8NodesWithoutMaster' => \\@k8NodesWithoutMaster };\n" +\r
1293                                 "}\n" +\r
1294                                 "\n" +\r
1295                                 "\n" +\r
1296                                 "\n" +\r
1297                                 "\n" +\r
1298                                 "\n" +\r
1299                                 "# Validate if number of masters ,= number of rea\n" +\r
1300                                 "\n" +\r
1301                                 "#\n" +\r
1302                                 "#sub filter\n" +\r
1303                                 "\n" +\r
1304                                 "=head\n" +\r
1305                                 "get \n" +\r
1306                                 "podName where k8Node eq \"x\"\n" +\r
1307                                 "    get position of k8node eq x \n" +\r
1308                                 "where \n" +\r
1309                                 "=cut\n" +\r
1310                                 "\n" +\r
1311                                 "exit;\n" +\r
1312                                 "\n" +\r
1313                                 "sub uniq {\n" +\r
1314                                 "    my %seen;\n" +\r
1315                                 "    grep !$seen{$_}++, @_;\n" +\r
1316                                 "}\n" +\r
1317                                 "\n" +\r
1318                                 "sub getCurrentStatus() {\n" +\r
1319                                 "\n" +\r
1320                                 "    # Run pod list command    \n" +\r
1321                                 "    my @getPods = `kubectl get po --no-headers  -o wide -l $podLabel |grep Running`;    chomp @getPods;\n" +\r
1322                                 "    #my @getPods = `kubectl get po --no-headers  -o wide -l managed-by=redis-cluster-operator|grep Running`;    chomp @getPods;\n" +\r
1323                                 "\n" +\r
1324                                 "    foreach my $podLine (@getPods) {\n" +\r
1325                                 "        my @podData = split(/\\s+/,$podLine);\n" +\r
1326                                 "        my ($podName,$status,$age,$podIP,$podNode) = ($podData[0], $podData[2], $podData[4], $podData[5],$podData[6]);\n" +\r
1327                                 "\n" +\r
1328                                 "        #print \"$podName,$status,$age,$podIP,$podNode\" .\"\\n\"; \n" +\r
1329                                 "        my $podRow = { 'podIP' => $podIP, 'podName' => $podName, 'k8Node' => $podNode, 'podAge' => $age, 'podStatus' => $status };    \n" +\r
1330                                 "        push (@{$podTable},$podRow)\n" +\r
1331                                 "    }\n" +\r
1332                                 "\n" +\r
1333                                 "    my $podName = $podTable->[0]{'podName'};\n" +\r
1334                                 "    #print \"Info:kubectl exec $podName  -- cat nodes.conf|sort -k3\\n\";\n" +\r
1335                                 "    my @rdNodeData = `kubectl exec $podName  -- cat nodes.conf|sort -k3`;    chomp @rdNodeData;\n" +\r
1336                                 "    foreach my $rdNodeLine (@rdNodeData) {\n" +\r
1337                                 "        next if ($rdNodeLine !~ /master|slave/);\n" +\r
1338                                 "            my @rdNodeData = split(/\\s+/,$rdNodeLine);\n" +\r
1339                                 "            my ($rdNodeID,$rdRole,$rdMasterNodeID,$epoch) = ($rdNodeData[0], $rdNodeData[2], $rdNodeData[3],$rdNodeData[5]);\n" +\r
1340                                 "            my ($podIP) = split(/:/,$rdNodeData[1]);\n" +\r
1341                                 "            $rdRole =~ s/myself,//;\n" +\r
1342                                 "\n" +\r
1343                                 "            #print \"$rdNodeID,$rdRole,$rdMasterNodeID,$podIP\" .\"\\n\";\n" +\r
1344                                 "            my $rdElem = { 'podIP'    => $podIP, \n" +\r
1345                                 "                           'rdNodeID' => $rdNodeID,\n" +\r
1346                                 "                           'rdRole'   => $rdRole,\n" +\r
1347                                 "                           'rdMasterNodeID' => $rdMasterNodeID,\n" +\r
1348                                 "                           'epoch'          => $epoch\n" +\r
1349                                 "            };\n" +\r
1350                                 "\n" +\r
1351                                 "        for(my $index=0; $index <= $#{$podTable}; $index++) {\n" +\r
1352                                 "            if ( $podTable->[$index]{'podIP'} eq $podIP ) {\n" +\r
1353                                 "                #print \"Matched\\n\";\n" +\r
1354                                 "                $podTable->[$index]{'rdNodeID'}       = $rdNodeID;\n" +\r
1355                                 "                $podTable->[$index]{'rdNodeRole'}        = $rdRole;\n" +\r
1356                                 "                $podTable->[$index]{'rdMasterNodeID'} = $rdMasterNodeID;\n" +\r
1357                                 "                $podTable->[$index]{'epoch'}          = $epoch;\n" +\r
1358                                 "            }\n" +\r
1359                                 "        }\n" +\r
1360                                 "        #exit;\n" +\r
1361                                 "\n" +\r
1362                                 "    }\n" +\r
1363                                 "}\n" +\r
1364                                 "",\r
1365                         "relatenode.sh": "#!/bin/sh\n" +\r
1366                                 "podLabel=${POD_LABEL}\n" +\r
1367                                 "firstPod=$(kubectl  get   po -o wide -l app.kubernetes.io/name=redis-cluster --no-headers=true|head -1|cut -d\" \" -f1)\n" +\r
1368                                 "\n" +\r
1369                                 "kubectl get po -o wide -l $podLabel |tail +2|awk '{printf(\"%s:%s:%s:%s\\n\",$6,$1,$7,$10)}'|sort  > /tmp/1.txt\n" +\r
1370                                 "kubectl exec  $firstPod  -- cat nodes.conf|sed 's/myself,//'|awk '/master|slave/ {print $2,$1,$3,$4}'|sort > /tmp/2.txt\n" +\r
1371                                 "join -t \":\"  /tmp/1.txt /tmp/2.txt |sort -k3,4 | sed 's/ /:/g'|awk -F\":\" '{print $2,$7,$3,$1,$4,$6,$8}' > /tmp/3.txt\n" +\r
1372                                 "\n" +\r
1373                                 "echo \"\\n   POD_NAME      ROLE      k8NODE        POD_IP                   REDIS_NODE_ID                       REDIS_MASTER_NODE_ID\"\n" +\r
1374                                 "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '1p') /tmp/3.txt\n" +\r
1375                                 "echo \"\"\n" +\r
1376                                 "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '2p') /tmp/3.txt\n" +\r
1377                                 "echo \"\"\n" +\r
1378                                 "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '3p') /tmp/3.txt",\r
1379                 },\r
1380                 ObjectMeta: metav1.ObjectMeta{\r
1381                         Name: "assigner-cm",\r
1382                 },\r
1383         }\r
1384 \r
1385         configMap29 := &corev1.ConfigMap{\r
1386                 Data: map[string]string{\r
1387                         "rmr_verbose": "0\n" +\r
1388                                 "",\r
1389                         "router.txt": "newrt|start\n" +\r
1390                                 "rte|10090|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
1391                                 "newrt|end",\r
1392                 },\r
1393                 ObjectMeta: metav1.ObjectMeta{\r
1394                         Name:      "configmap-ricplt-rsm-router-configmap",\r
1395                         Namespace: "ricplt",\r
1396                 },\r
1397                 TypeMeta: metav1.TypeMeta{\r
1398                         APIVersion: "v1",\r
1399                         Kind:       "ConfigMap",\r
1400                 },\r
1401         }\r
1402 \r
1403         configMap30 := &corev1.ConfigMap{\r
1404                 ObjectMeta: metav1.ObjectMeta{\r
1405                         Name:      "configmap-ricplt-rsm",\r
1406                         Namespace: "ricplt",\r
1407                 },\r
1408                 TypeMeta: metav1.TypeMeta{\r
1409                         APIVersion: "v1",\r
1410                         Kind:       "ConfigMap",\r
1411                 },\r
1412                 Data: map[string]string{\r
1413                         "configuration.yaml": "logging:\n" +\r
1414                                 "  logLevel:  \"info\"\n" +\r
1415                                 "http:\n" +\r
1416                                 "  port: 4800\n" +\r
1417                                 "rmr:\n" +\r
1418                                 "  port: 4801\n" +\r
1419                                 "  maxMsgSize: 4096\n" +\r
1420                                 "  readyIntervalSec: 1\n" +\r
1421                                 "rnib:\n" +\r
1422                                 "  maxRnibConnectionAttempts: 3\n" +\r
1423                                 "  rnibRetryIntervalMs: 10",\r
1424                 },\r
1425         }\r
1426 \r
1427         configMap31 := &corev1.ConfigMap{\r
1428                 Data: map[string]string{\r
1429                         "RMR_RTG_SVC": "4561",\r
1430                 },\r
1431                 ObjectMeta: metav1.ObjectMeta{\r
1432                         Name: "configmap-ricplt-rsm-env",\r
1433                 },\r
1434                 TypeMeta: metav1.TypeMeta{\r
1435                         APIVersion: "v1",\r
1436                         Kind:       "ConfigMap",\r
1437                 },\r
1438         }\r
1439 \r
1440         configMap32 := &corev1.ConfigMap{\r
1441                 Data: map[string]string{\r
1442                         "rtmgrcfg": "\"PlatformComponents\":\n" +\r
1443                                 "  -\n" +\r
1444                                 "    \"name\": \"SUBMAN\"\n" +\r
1445                                 "    \"fqdn\": \"service-ricplt-submgr-rmr.ricplt\"\n" +\r
1446                                 "    \"port\": 4560\n" +\r
1447                                 "  -\n" +\r
1448                                 "    \"name\": \"E2MAN\"\n" +\r
1449                                 "    \"fqdn\": \"service-ricplt-e2mgr-rmr.ricplt\"\n" +\r
1450                                 "    \"port\": 3801\n" +\r
1451                                 "  -\n" +\r
1452                                 "    \"name\": \"A1MEDIATOR\"\n" +\r
1453                                 "    \"fqdn\": \"service-ricplt-a1mediator-rmr.ricplt\"\n" +\r
1454                                 "    \"port\": 4562\n" +\r
1455                                 "\n" +\r
1456                                 "\"XMURL\":\n" +\r
1457                                 "  \"http://service-ricplt-appmgr-http:8080/ric/v1/xapps\"\n" +\r
1458                                 "\"E2MURL\":\n" +\r
1459                                 "  \"http://service-ricplt-e2mgr-http:3800/v1/e2t/list\"\n" +\r
1460                                 "\"RTFILE\":\n" +\r
1461                                 "  \"/db/rt.json\"\n" +\r
1462                                 "\"CFGFILE\":\n" +\r
1463                                 "  \"/cfg/rtmgr-config.yaml\"\n" +\r
1464                                 "\"RPE\":\n" +\r
1465                                 "  \"rmrpush\"\n" +\r
1466                                 "\"SBI\":\n" +\r
1467                                 "  \"rmrpush\"\n" +\r
1468                                 "\"SBIURL\":\n" +\r
1469                                 "  \"0.0.0.0\"\n" +\r
1470                                 "\"NBI\":\n" +\r
1471                                 "  \"httpRESTful\"\n" +\r
1472                                 "\"NBIURL\":\n" +\r
1473                                 "  \"http://service-ricplt-rtmgr-http:3800\"\n" +\r
1474                                 "\"SDL\":\n" +\r
1475                                 "  \"file\"\n" +\r
1476                                 "\"local\":\n" +\r
1477                                 "  \"host\": \":8080\"\n" +\r
1478                                 "\"logger\":\n" +\r
1479                                 "  \"level\": 4\n" +\r
1480                                 "\"periodicRoutes\":\n" +\r
1481                                 "  \"enable\"              \n" +\r
1482                                 "\"rmr\":\n" +\r
1483                                 "  \"protPort\": \"tcp:4560\"\n" +\r
1484                                 "  \"maxSize\": 1024\n" +\r
1485                                 "  \"numWorkers\": 1\n" +\r
1486                                 "  \"threadType\": 1\n" +\r
1487                                 "\"messagetypes\": [\n" +\r
1488                                 "   \"RIC_HEALTH_CHECK_REQ=100\",\n" +\r
1489                                 "   \"RIC_HEALTH_CHECK_RESP=101\",\n" +\r
1490                                 "   \"RIC_ALARM=110\",\n" +\r
1491                                 "   \"RIC_ALARM_QUERY=111\",\n" +\r
1492                                 "   \"RIC_SCTP_CONNECTION_FAILURE=1080\",\n" +\r
1493                                 "   \"E2_TERM_INIT=1100\",\n" +\r
1494                                 "   \"E2_TERM_KEEP_ALIVE_REQ=1101\",\n" +\r
1495                                 "   \"E2_TERM_KEEP_ALIVE_RESP=1102\",\n" +\r
1496                                 "   \"RIC_SCTP_CLEAR_ALL=1090\",\n" +\r
1497                                 "   \"RAN_CONNECTED=1200\",\n" +\r
1498                                 "   \"RAN_RESTARTED=1210\",\n" +\r
1499                                 "   \"RAN_RECONFIGURED=1220\",\n" +\r
1500                                 "   \"RIC_ENB_LOAD_INFORMATION=10020\",\n" +\r
1501                                 "   \"RIC_SN_STATUS_TRANSFER=10040\",\n" +\r
1502                                 "   \"RIC_UE_CONTEXT_RELEASE=10050\",\n" +\r
1503                                 "   \"RIC_X2_SETUP_REQ=10060\",\n" +\r
1504                                 "   \"RIC_X2_SETUP_RESP=10061\",\n" +\r
1505                                 "   \"RIC_X2_SETUP_FAILURE=10062\",\n" +\r
1506                                 "   \"RIC_X2_RESET=10070\",\n" +\r
1507                                 "   \"RIC_X2_RESET_RESP=10071\",\n" +\r
1508                                 "   \"RIC_ENB_CONF_UPDATE=10080\",\n" +\r
1509                                 "   \"RIC_ENB_CONF_UPDATE_ACK=10081\",\n" +\r
1510                                 "   \"RIC_ENB_CONF_UPDATE_FAILURE=10082\",\n" +\r
1511                                 "   \"RIC_RES_STATUS_REQ=10090\",\n" +\r
1512                                 "   \"RIC_RES_STATUS_RESP=10091\",\n" +\r
1513                                 "   \"RIC_RES_STATUS_FAILURE=10092\",\n" +\r
1514                                 "   \"RIC_SGNB_ADDITION_REQ=10270\",\n" +\r
1515                                 "   \"RIC_SGNB_ADDITION_ACK=10271\",\n" +\r
1516                                 "   \"RIC_SGNB_ADDITION_REJECT=10272\",\n" +\r
1517                                 "   \"RIC_SGNB_RECONF_COMPLETE=10280\",\n" +\r
1518                                 "   \"RIC_SGNB_MOD_REQUEST=10290\",\n" +\r
1519                                 "   \"RIC_SGNB_MOD_REQUEST_ACK=10291\",\n" +\r
1520                                 "   \"RIC_SGNB_MOD_REQUEST_REJ=10292\",\n" +\r
1521                                 "   \"RIC_SGNB_MOD_REQUIRED=10300\",\n" +\r
1522                                 "   \"RIC_SGNB_MOD_CONFIRM=10301\",\n" +\r
1523                                 "   \"RIC_SGNB_MOD_REFUSE=10302\",\n" +\r
1524                                 "   \"RIC_SGNB_RELEASE_REQUEST=10310\",\n" +\r
1525                                 "   \"RIC_SGNB_RELEASE_REQUEST_ACK=10311\",\n" +\r
1526                                 "   \"RIC_SGNB_RELEASE_REQUIRED=10320\",\n" +\r
1527                                 "   \"RIC_SGNB_RELEASE_CONFIRM=10321\",\n" +\r
1528                                 "   \"RIC_RRC_TRANSFER=10350\",\n" +\r
1529                                 "   \"RIC_ENDC_X2_SETUP_REQ=10360\",\n" +\r
1530                                 "   \"RIC_ENDC_X2_SETUP_RESP=10361\",\n" +\r
1531                                 "   \"RIC_ENDC_X2_SETUP_FAILURE=10362\",\n" +\r
1532                                 "   \"RIC_ENDC_CONF_UPDATE=10370\",\n" +\r
1533                                 "   \"RIC_ENDC_CONF_UPDATE_ACK=10371\",\n" +\r
1534                                 "   \"RIC_ENDC_CONF_UPDATE_FAILURE=10372\",\n" +\r
1535                                 "   \"RIC_SECONDARY_RAT_DATA_USAGE_REPORT=10380\",\n" +\r
1536                                 "   \"RIC_E2_SETUP_REQ=12001\",\n" +\r
1537                                 "   \"RIC_E2_SETUP_RESP=12002\",\n" +\r
1538                                 "   \"RIC_E2_SETUP_FAILURE=12003\",\n" +\r
1539                                 "   \"RIC_ERROR_INDICATION=12007\",\n" +\r
1540                                 "   \"RIC_SUB_REQ=12010\",\n" +\r
1541                                 "   \"RIC_SUB_RESP=12011\",\n" +\r
1542                                 "   \"RIC_SUB_FAILURE=12012\",\n" +\r
1543                                 "   \"RIC_SUB_DEL_REQ=12020\",\n" +\r
1544                                 "   \"RIC_SUB_DEL_RESP=12021\",\n" +\r
1545                                 "   \"RIC_SUB_DEL_FAILURE=12022\",\n" +\r
1546                                 "   \"RIC_SUB_DEL_REQUIRED=12023\",\n" +\r
1547                                 "   \"RIC_CONTROL_REQ=12040\",\n" +\r
1548                                 "   \"RIC_CONTROL_ACK=12041\",\n" +\r
1549                                 "   \"RIC_CONTROL_FAILURE=12042\",\n" +\r
1550                                 "   \"RIC_INDICATION=12050\",\n" +\r
1551                                 "   \"A1_POLICY_REQ=20010\",\n" +\r
1552                                 "   \"A1_POLICY_RESP=20011\",\n" +\r
1553                                 "   \"A1_POLICY_QUERY=20012\",\n" +\r
1554                                 "   \"TS_UE_LIST=30000\",\n" +\r
1555                                 "   \"TS_QOE_PRED_REQ=30001\",\n" +\r
1556                                 "   \"TS_QOE_PREDICTION=30002\",\n" +\r
1557                                 "   \"TS_ANOMALY_UPDATE=30003\",\n" +\r
1558                                 "   \"TS_ANOMALY_ACK=30004\",\n" +\r
1559                                 "   \"MC_REPORT=30010\",\n" +\r
1560                                 "   \"DCAPTERM_RTPM_RMR_MSGTYPE=33001\",\n" +\r
1561                                 "   \"DCAPTERM_GEO_RMR_MSGTYPE=33002\",\n" +\r
1562                                 "   \"RIC_SERVICE_UPDATE=12030\",\n" +\r
1563                                 "   \"RIC_SERVICE_UPDATE_ACK=12031\",\n" +\r
1564                                 "   \"RIC_SERVICE_UPDATE_FAILURE=12032\",\n" +\r
1565                                 "   \"RIC_E2NODE_CONFIG_UPDATE=12070\",\n" +\r
1566                                 "   \"RIC_E2NODE_CONFIG_UPDATE_ACK==12071\",\n" +\r
1567                                 "   \"RIC_E2NODE_CONFIG_UPDATE_FAILURE=12072\",\n" +\r
1568                                 "   \"RIC_E2_RESET_REQ=12004\",\n" +\r
1569                                 "   \"RIC_E2_RESET_RESP=12005\",\n" +\r
1570                                 "   ]\n" +\r
1571                                 "\n" +\r
1572                                 "\"PlatformRoutes\": [\n" +\r
1573                                 "  { 'messagetype': 'RIC_SUB_REQ', 'senderendpoint': 'SUBMAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1574                                 "  { 'messagetype': 'RIC_SUB_DEL_REQ', 'senderendpoint': 'SUBMAN', 'subscriptionid': -1,'endpoint': '', 'meid': '%meid'},\n" +\r
1575                                 "  { 'messagetype': 'RIC_SUB_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
1576                                 "  { 'messagetype': 'RIC_SUB_DEL_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
1577                                 "  { 'messagetype': 'RIC_SUB_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
1578                                 "  { 'messagetype': 'RIC_SUB_DEL_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
1579                                 "  { 'messagetype': 'RIC_SUB_DEL_REQUIRED', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
1580                                 "  { 'messagetype': 'RIC_X2_SETUP_REQ', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1581                                 "  { 'messagetype': 'RIC_X2_RESET', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1582                                 "  { 'messagetype': 'RIC_X2_RESET_RESP', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1583                                 "  { 'messagetype': 'RIC_ENDC_X2_SETUP_REQ', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1584                                 "  { 'messagetype': 'RIC_ENB_CONF_UPDATE_ACK', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1585                                 "  { 'messagetype': 'RIC_ENB_CONF_UPDATE_FAILURE', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1586                                 "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE_ACK', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1587                                 "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE_FAILURE', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
1588                                 "  { 'messagetype': 'RIC_E2_SETUP_REQ', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1589                                 "  { 'messagetype': 'E2_TERM_INIT', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1590                                 "  { 'messagetype': 'RIC_X2_SETUP_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1591                                 "  { 'messagetype': 'RIC_X2_SETUP_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1592                                 "  { 'messagetype': 'RIC_X2_RESET', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1593                                 "  { 'messagetype': 'RIC_X2_RESET_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1594                                 "  { 'messagetype': 'RIC_ENDC_X2_SETUP_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1595                                 "  { 'messagetype': 'RIC_ENDC_X2_SETUP_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1596                                 "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1597                                 "  { 'messagetype': 'RIC_SCTP_CONNECTION_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1598                                 "  { 'messagetype': 'RIC_ERROR_INDICATION', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1599                                 "  { 'messagetype': 'RIC_ENB_CONF_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1600                                 "  { 'messagetype': 'RIC_ENB_LOAD_INFORMATION', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1601                                 "  { 'messagetype': 'E2_TERM_KEEP_ALIVE_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1602                                 "  { 'messagetype': 'A1_POLICY_QUERY', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'A1MEDIATOR', 'meid': ''},\n" +\r
1603                                 "  { 'messagetype': 'A1_POLICY_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'A1MEDIATOR', 'meid': ''},\n" +\r
1604                                 "  { 'messagetype': 'RIC_SERVICE_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1605                                 "  { 'messagetype': 'RIC_E2NODE_CONFIG_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1606                                 "  { 'messagetype': 'RIC_E2_RESET_REQ', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
1607                                 "   ]\n" +\r
1608                                 "",\r
1609                 },\r
1610                 ObjectMeta: metav1.ObjectMeta{\r
1611                         Name: "configmap-ricplt-rtmgr-rtmgrcfg",\r
1612                 },\r
1613                 TypeMeta: metav1.TypeMeta{\r
1614                         APIVersion: "v1",\r
1615                         Kind:       "ConfigMap",\r
1616                 },\r
1617         }\r
1618 \r
1619         configMap33 := &corev1.ConfigMap{\r
1620                 Data: map[string]string{\r
1621                         "CFGFILE":     "/cfg/rtmgr-config.yaml",\r
1622                         "RMR_RTG_SVC": "4561",\r
1623                         "RMR_SEED_RT": "/uta_rtg_ric.rt",\r
1624                         "RMR_SRC_ID":  "service-ricplt-rtmgr-rmr.ricplt",\r
1625                         "XMURL":       "http://service-ricplt-appmgr-http:8080/ric/v1/xapps",\r
1626                 },\r
1627                 ObjectMeta: metav1.ObjectMeta{\r
1628                         Name: "configmap-ricplt-rtmgr-env",\r
1629                 },\r
1630                 TypeMeta: metav1.TypeMeta{\r
1631                         Kind:       "ConfigMap",\r
1632                         APIVersion: "v1",\r
1633                 },\r
1634         }\r
1635 \r
1636         configMap34 := &corev1.ConfigMap{\r
1637                 Data: map[string]string{\r
1638                         "submgrcfg": "\"local\":\n" +\r
1639                                 "  \"host\": \":8080\"\n" +\r
1640                                 "\"logger\":\n" +\r
1641                                 "  \"level\": 3\n" +\r
1642                                 "\"rmr\":\n" +\r
1643                                 "  \"protPort\" : \"tcp:4560\"\n" +\r
1644                                 "  \"maxSize\": 8192\n" +\r
1645                                 "  \"numWorkers\": 1\n" +\r
1646                                 "\"rtmgr\":\n" +\r
1647                                 "  \"hostAddr\": \"service-ricplt-rtmgr-http\"\n" +\r
1648                                 "  \"port\"    : 3800\n" +\r
1649                                 "  \"baseUrl\" : \"/ric/v1\"\n" +\r
1650                                 "\"db\":\n" +\r
1651                                 "  \"sessionNamespace\": \"XMSession\"\n" +\r
1652                                 "  \"host\": \":6379\"\n" +\r
1653                                 "  \"prot\": \"tcp\"\n" +\r
1654                                 "  \"maxIdle\": 80\n" +\r
1655                                 "  \"maxActive\": 12000\n" +\r
1656                                 "\"controls\":\n" +\r
1657                                 "  \"e2tSubReqTimeout_ms\": 2000\n" +\r
1658                                 "  \"e2tSubDelReqTime_ms\": 2000\n" +\r
1659                                 "  \"e2tRecvMsgTimeout_ms\": 2000\n" +\r
1660                                 "  \"e2tMaxSubReqTryCount\": 2\n" +\r
1661                                 "  \"e2tMaxSubDelReqTryCount\": 2\n" +\r
1662                                 "  \"checkE2State\": \"true\"\n" +\r
1663                                 "  \"readSubsFromDb\": \"true\"\n" +\r
1664                                 "  \"dbTryCount\": 200\n" +\r
1665                                 "  \"dbRetryForever\": \"true\"\n" +\r
1666                                 "  \"waitRouteCleanup_ms\": 5000\n" +\r
1667                                 "",\r
1668                         "submgrutartg": "newrt|start\n" +\r
1669                                 "newrt|end\n" +\r
1670                                 "",\r
1671                 },\r
1672                 ObjectMeta: metav1.ObjectMeta{\r
1673                         Name:      "submgrcfg",\r
1674                         Namespace: "ricplt",\r
1675                 },\r
1676                 TypeMeta: metav1.TypeMeta{\r
1677                         Kind:       "ConfigMap",\r
1678                         APIVersion: "v1",\r
1679                 },\r
1680         }\r
1681 \r
1682         configMap35 := &corev1.ConfigMap{\r
1683                 TypeMeta: metav1.TypeMeta{\r
1684                         APIVersion: "v1",\r
1685                         Kind:       "ConfigMap",\r
1686                 },\r
1687                 Data: map[string]string{\r
1688                         "SUBMGR_SEED_SN": "1",\r
1689                         "CFG_FILE":       "/cfg/submgr-config.yaml",\r
1690                         "RMR_RTG_SVC":    "4561",\r
1691                         "RMR_SEED_RT":    "/cfg/submgr-uta-rtg.rt",\r
1692                         "RMR_SRC_ID":     "service-ricplt-submgr-rmr.ricplt",\r
1693                 },\r
1694                 ObjectMeta: metav1.ObjectMeta{\r
1695                         Name: "configmap-ricplt-submgr-env",\r
1696                 },\r
1697         }\r
1698 \r
1699         configMap36 := &corev1.ConfigMap{\r
1700                 ObjectMeta: metav1.ObjectMeta{\r
1701                         Name:      "configmap-ricplt-vespamgr",\r
1702                         Namespace: "ricplt",\r
1703                 },\r
1704                 TypeMeta: metav1.TypeMeta{\r
1705                         APIVersion: "v1",\r
1706                         Kind:       "ConfigMap",\r
1707                 },\r
1708                 Data: map[string]string{\r
1709                         "VESMGR_PRICOLLECTOR_SECURE":     "false",\r
1710                         "VESMGR_PRICOLLECTOR_SERVERROOT": "/vescollector",\r
1711                         "VESMGR_ALERTMANAGER_BIND_ADDR":  ":9095",\r
1712                         "VESMGR_PRICOLLECTOR_PASSWORD":   "sample1",\r
1713                         "VESMGR_PRICOLLECTOR_ADDR":       "aux-entry",\r
1714                         "VESMGR_PRICOLLECTOR_PORT":       "8443",\r
1715                         "VESMGR_PRICOLLECTOR_USER":       "sample1",\r
1716                         "VESMGR_PROMETHEUS_ADDR":         "http://r4-infrastructure-prometheus-server.ricplt",\r
1717                         "VESMGR_HB_INTERVAL":             "60s",\r
1718                         "VESMGR_MEAS_INTERVAL":           "30s",\r
1719                 },\r
1720         }\r
1721 \r
1722         configMap37 := &corev1.ConfigMap{\r
1723                 TypeMeta: metav1.TypeMeta{\r
1724                         APIVersion: "v1",\r
1725                         Kind:       "ConfigMap",\r
1726                 },\r
1727                 Data: map[string]string{\r
1728                         "DEBUG":                 "true",\r
1729                         "PORT":                  "8080",\r
1730                         "STORAGE":               "local",\r
1731                         "STORAGE_LOCAL_ROOTDIR": "/charts",\r
1732                 },\r
1733                 ObjectMeta: metav1.ObjectMeta{\r
1734                         Name: "configmap-ricplt-xapp-onboarder-chartmuseum-env",\r
1735                 },\r
1736         }\r
1737 \r
1738         configMap38 := &corev1.ConfigMap{\r
1739                 Data: map[string]string{\r
1740                         "CHART_REPO_URL":               "http://0.0.0.0:8080",\r
1741                         "CHART_WORKSPACE_PATH":         "/tmp/xapp-onboarder",\r
1742                         "HTTP_RETRY":                   "3",\r
1743                         "MOCK_TEST_HELM_REPO_TEMP_DIR": "/tmp/mock_helm_repo",\r
1744                         "ALLOW_REDEPLOY":               "True",\r
1745                         "CHART_WORKSPACE_SIZE":         "500MB",\r
1746                         "FLASK_DEBUG":                  "False",\r
1747                         "FLASK_PORT":                   "8888",\r
1748                         "HELM_VERSION":                 "2.12.3",\r
1749                         "MOCK_TEST_MODE":               "False",\r
1750                 },\r
1751                 ObjectMeta: metav1.ObjectMeta{\r
1752                         Name: "configmap-ricplt-xapp-onboarder-env",\r
1753                 },\r
1754                 TypeMeta: metav1.TypeMeta{\r
1755                         APIVersion: "v1",\r
1756                         Kind:       "ConfigMap",\r
1757                 },\r
1758         }\r
1759 \r
1760         return []*corev1.ConfigMap{configMap1, configMap2, configMap3, configMap4, configMap5, configMap6, configMap7, configMap8, configMap9, configMap10, configMap11, configMap12, configMap13, configMap14, configMap15, configMap16, configMap17, configMap18, configMap19, configMap20, configMap21, configMap22, configMap23, configMap24, configMap25, configMap26, configMap27, configMap28, configMap29, configMap30, configMap31, configMap32, configMap33, configMap34, configMap35, configMap36, configMap37, configMap38}\r
1761 }\r