Adding Configmap for RIC service. 60/12160/2
authornaman.gupta <naman.gupta@samsung.com>
Fri, 1 Dec 2023 18:22:06 +0000 (23:52 +0530)
committerThoralf Czichy <thoralf.czichy@nokia.com>
Thu, 7 Dec 2023 09:37:01 +0000 (09:37 +0000)
Adding Configmap for RIC service.

Change-Id: Ibdf1b9d1513524fb2fc8a955716a4ee8c3f2a924
Signed-off-by: naman.gupta <naman.gupta@samsung.com>
depRicKubernetesOperator/internal/controller/getConfigmap.go

index af56cd3..8e0b67f 100644 (file)
@@ -264,6 +264,64 @@ func GetConfigMap() []*corev1.ConfigMap {
                },\r
        }\r
 \r
+       configMap10 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "DBAAS_NODE_COUNT":   "1",\r
+                       "DBAAS_SERVICE_HOST": "service-ricplt-dbaas-tcp.ricplt",\r
+                       "DBAAS_SERVICE_PORT": "6379",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "configmap-ricplt-dbaas-appconfig",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap11 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "DBAAS_NODE_COUNT":   "1",\r
+                       "DBAAS_SERVICE_HOST": "service-ricplt-dbaas-tcp.ricplt",\r
+                       "DBAAS_SERVICE_PORT": "6379",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "dbaas-appconfig",\r
+                       Namespace: "ricxapp",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap12 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "redis.conf": "dir \"/data\"\n" +\r
+                               "appendonly no\n" +\r
+                               "bind 0.0.0.0\n" +\r
+                               "loadmodule /usr/local/libexec/redismodule/libredismodule.so\n" +\r
+                               "protected-mode no\n" +\r
+                               "save\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Namespace: "ricplt",\r
+                       Labels: map[string]string{\r
+                               "app":      "ricplt-dbaas",\r
+                               "chart":    "dbaas-2.0.0",\r
+                               "heritage": "Helm",\r
+                               "release":  "release-name",\r
+                       },\r
+                       Name: "configmap-ricplt-dbaas-config",\r
+               },\r
+       }\r
+\r
        configMap13 := &corev1.ConfigMap{\r
                Data: map[string]string{\r
                        "rmr_verbose": "0\n" +\r
@@ -451,6 +509,1253 @@ func GetConfigMap() []*corev1.ConfigMap {
                },\r
        }\r
 \r
+       configMap20 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "servers.conf": "# Prometheus metrics and health-checking server\n" +\r
+                               "server {\n" +\r
+                               "    server_name kong_prometheus_exporter;\n" +\r
+                               "    listen 0.0.0.0:9542; # can be any other port as well\n" +\r
+                               "    access_log off;\n" +\r
+                               "    location /status {\n" +\r
+                               "        default_type text/plain;\n" +\r
+                               "        return 200;\n" +\r
+                               "    }\n" +\r
+                               "    location /metrics {\n" +\r
+                               "        default_type text/plain;\n" +\r
+                               "        content_by_lua_block {\n" +\r
+                               "             local prometheus = require \"kong.plugins.prometheus.exporter\"\n" +\r
+                               "             prometheus:collect()\n" +\r
+                               "        }\n" +\r
+                               "    }\n" +\r
+                               "    location /nginx_status {\n" +\r
+                               "        internal;\n" +\r
+                               "        access_log off;\n" +\r
+                               "        stub_status;\n" +\r
+                               "    }\n" +\r
+                               "}\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Labels: map[string]string{\r
+                               "app.kubernetes.io/name":       "kong",\r
+                               "app.kubernetes.io/version":    "1.4",\r
+                               "helm.sh/chart":                "kong-0.36.6",\r
+                               "app.kubernetes.io/instance":   "release-name",\r
+                               "app.kubernetes.io/managed-by": "Helm",\r
+                       },\r
+                       Name: "release-name-kong-default-custom-server-blocks",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap21 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "TRACING_JAEGER_LOG_LEVEL":     "error",\r
+                       "TRACING_JAEGER_SAMPLER_PARAM": "1",\r
+                       "TRACING_JAEGER_SAMPLER_TYPE":  "const",\r
+                       "TRACING_ENABLED":              "0",\r
+                       "TRACING_JAEGER_AGENT_ADDR":    "service-ricplt-jaegeradapter-agent.ricplt",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Namespace: "ricplt",\r
+                       Name:      "configmap-ricplt-jaegeradapter",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       Kind:       "ConfigMap",\r
+                       APIVersion: "v1",\r
+               },\r
+       }\r
+\r
+       configMap22 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "servers.conf": "# Prometheus metrics and health-checking server\n" +\r
+                               "server {\n" +\r
+                               "    server_name kong_prometheus_exporter;\n" +\r
+                               "    listen 0.0.0.0:9542; # can be any other port as well\n" +\r
+                               "    access_log off;\n" +\r
+                               "    location /status {\n" +\r
+                               "        default_type text/plain;\n" +\r
+                               "        return 200;\n" +\r
+                               "    }\n" +\r
+                               "    location /metrics {\n" +\r
+                               "        default_type text/plain;\n" +\r
+                               "        content_by_lua_block {\n" +\r
+                               "             local prometheus = require \"kong.plugins.prometheus.exporter\"\n" +\r
+                               "             prometheus:collect()\n" +\r
+                               "        }\n" +\r
+                               "    }\n" +\r
+                               "    location /nginx_status {\n" +\r
+                               "        internal;\n" +\r
+                               "        access_log off;\n" +\r
+                               "        stub_status;\n" +\r
+                               "    }\n" +\r
+                               "}\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Labels: map[string]string{\r
+                               "app.kubernetes.io/version":    "1.4",\r
+                               "helm.sh/chart":                "kong-0.36.6",\r
+                               "app.kubernetes.io/instance":   "release-name",\r
+                               "app.kubernetes.io/managed-by": "Helm",\r
+                               "app.kubernetes.io/name":       "kong",\r
+                       },\r
+                       Name: "release-name-kong-default-custom-server-blocks",\r
+               },\r
+       }\r
+\r
+       configMap23 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "config-file.json": "{\n" +\r
+                               "    \"local\": {\n" +\r
+                               "        \"host\": \":8080\"\n" +\r
+                               "    },\n" +\r
+                               "    \"logger\": {\n" +\r
+                               "        \"level\": 4\n" +\r
+                               "    },\n" +\r
+                               "    \"db\": {\n" +\r
+                               "        \"namespaces\": [\"sdl\", \"rnib\"]\n" +\r
+                               "    },\n" +\r
+                               "    \"rmr\": {\n" +\r
+                               "        \"protPort\": \"tcp:4560\",\n" +\r
+                               "        \"maxSize\": 65536,\n" +\r
+                               "        \"numWorkers\": 1\n" +\r
+                               "    },\n" +\r
+                               "    \"sbi\": {\n" +\r
+                               "        \"appmgrAddr\": \"service-ricplt-appmgr-http:8080\",\n" +\r
+                               "        \"alertmgrAddr\": \"r4-infrastructure-prometheus-alertmanager:80\",\n" +\r
+                               "        \"timeout\": 30\n" +\r
+                               "    },\n" +\r
+                               "    \"nbi\": {\n" +\r
+                               "        \"schemas\": [\"o-ran-sc-ric-xapp-desc-v1\", \"o-ran-sc-ric-ueec-config-v1\"]\n" +\r
+                               "    },\n" +\r
+                               "    \"controls\": {\n" +\r
+                               "        \"active\": true\n" +\r
+                               "    }\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "",\r
+                       "uta_rtg.rt": "newrt|start\n" +\r
+                               "rte|13111|127.0.0.1:4588\n" +\r
+                               "rte|13111|127.0.0.1:4560\n" +\r
+                               "newrt|end\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "configmap-ricplt-o1mediator-appconfig-configmap",\r
+                       Namespace: "ricplt",\r
+               },\r
+       }\r
+\r
+       configMap24 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "RMR_SEED_RT": "/etc/o1agent/uta_rtg.rt",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-o1mediator-env",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       Kind:       "ConfigMap",\r
+                       APIVersion: "v1",\r
+               },\r
+       }\r
+\r
+       configMap25 := &corev1.ConfigMap{\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Labels: map[string]string{\r
+                               "chart":     "prometheus-11.3.0",\r
+                               "component": "alertmanager",\r
+                               "heritage":  "Helm",\r
+                               "release":   "release-name",\r
+                               "app":       "prometheus",\r
+                       },\r
+                       Name:      "release-name-prometheus-alertmanager",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "alertmanager.yml": "global:\n" +\r
+                               "  resolve_timeout: 5m\n" +\r
+                               "receivers:\n" +\r
+                               "- name: vespa\n" +\r
+                               "  webhook_configs:\n" +\r
+                               "  - url: http://service-ricplt-vespamgr-http:9095/alerts\n" +\r
+                               "route:\n" +\r
+                               "  group_by:\n" +\r
+                               "  - alertname\n" +\r
+                               "  - severity\n" +\r
+                               "  - instance\n" +\r
+                               "  - job\n" +\r
+                               "  group_interval: 3m\n" +\r
+                               "  group_wait: 5s\n" +\r
+                               "  receiver: vespa\n" +\r
+                               "  repeat_interval: 1h\n" +\r
+                               "  routes:\n" +\r
+                               "  - continue: true\n" +\r
+                               "    receiver: vespa\n" +\r
+                               "",\r
+               },\r
+       }\r
+\r
+       configMap26 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "alerting_rules.yml": "{}\n" +\r
+                               "",\r
+                       "alerts": "{}\n" +\r
+                               "",\r
+                       "prometheus.yml": "global:\n" +\r
+                               "  evaluation_interval: 1m\n" +\r
+                               "  scrape_interval: 1m\n" +\r
+                               "  scrape_timeout: 10s\n" +\r
+                               "rule_files:\n" +\r
+                               "- /etc/config/recording_rules.yml\n" +\r
+                               "- /etc/config/alerting_rules.yml\n" +\r
+                               "- /etc/config/rules\n" +\r
+                               "- /etc/config/alerts\n" +\r
+                               "scrape_configs:\n" +\r
+                               "- job_name: prometheus\n" +\r
+                               "  static_configs:\n" +\r
+                               "  - targets:\n" +\r
+                               "    - localhost:9090\n" +\r
+                               "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
+                               "  job_name: kubernetes-apiservers\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: endpoints\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: default;kubernetes;https\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    - __meta_kubernetes_service_name\n" +\r
+                               "    - __meta_kubernetes_endpoint_port_name\n" +\r
+                               "  scheme: https\n" +\r
+                               "  tls_config:\n" +\r
+                               "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
+                               "    insecure_skip_verify: true\n" +\r
+                               "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
+                               "  job_name: kubernetes-nodes\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: node\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_node_label_(.+)\n" +\r
+                               "  - replacement: kubernetes.default.svc:443\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - regex: (.+)\n" +\r
+                               "    replacement: /api/v1/nodes/$1/proxy/metrics\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_node_name\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  scheme: https\n" +\r
+                               "  tls_config:\n" +\r
+                               "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
+                               "    insecure_skip_verify: true\n" +\r
+                               "- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
+                               "  job_name: kubernetes-nodes-cadvisor\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: node\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_node_label_(.+)\n" +\r
+                               "  - replacement: kubernetes.default.svc:443\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - regex: (.+)\n" +\r
+                               "    replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_node_name\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  scheme: https\n" +\r
+                               "  tls_config:\n" +\r
+                               "    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
+                               "    insecure_skip_verify: true\n" +\r
+                               "- job_name: kubernetes-service-endpoints\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: endpoints\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: true\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_scrape\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (https?)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_scheme\n" +\r
+                               "    target_label: __scheme__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (.+)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_path\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
+                               "    replacement: $1:$2\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __address__\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_port\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    target_label: kubernetes_namespace\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_name\n" +\r
+                               "    target_label: kubernetes_name\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_node_name\n" +\r
+                               "    target_label: kubernetes_node\n" +\r
+                               "- job_name: kubernetes-service-endpoints-slow\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: endpoints\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: true\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_scrape_slow\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (https?)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_scheme\n" +\r
+                               "    target_label: __scheme__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (.+)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_path\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
+                               "    replacement: $1:$2\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __address__\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_port\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    target_label: kubernetes_namespace\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_name\n" +\r
+                               "    target_label: kubernetes_name\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_node_name\n" +\r
+                               "    target_label: kubernetes_node\n" +\r
+                               "  scrape_interval: 5m\n" +\r
+                               "  scrape_timeout: 30s\n" +\r
+                               "- honor_labels: true\n" +\r
+                               "  job_name: prometheus-pushgateway\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: service\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: pushgateway\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_probe\n" +\r
+                               "- job_name: kubernetes-services\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: service\n" +\r
+                               "  metrics_path: /probe\n" +\r
+                               "  params:\n" +\r
+                               "    module:\n" +\r
+                               "    - http_2xx\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: true\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_annotation_prometheus_io_probe\n" +\r
+                               "  - source_labels:\n" +\r
+                               "    - __address__\n" +\r
+                               "    target_label: __param_target\n" +\r
+                               "  - replacement: blackbox\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - source_labels:\n" +\r
+                               "    - __param_target\n" +\r
+                               "    target_label: instance\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_service_label_(.+)\n" +\r
+                               "  - source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    target_label: kubernetes_namespace\n" +\r
+                               "  - source_labels:\n" +\r
+                               "    - __meta_kubernetes_service_name\n" +\r
+                               "    target_label: kubernetes_name\n" +\r
+                               "- job_name: kubernetes-pods\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: pod\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: true\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_scrape\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (.+)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_path\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
+                               "    replacement: $1:$2\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __address__\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_port\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_pod_label_(.+)\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    target_label: kubernetes_namespace\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_name\n" +\r
+                               "    target_label: kubernetes_pod_name\n" +\r
+                               "- job_name: kubernetes-pods-slow\n" +\r
+                               "  kubernetes_sd_configs:\n" +\r
+                               "  - role: pod\n" +\r
+                               "  relabel_configs:\n" +\r
+                               "  - action: keep\n" +\r
+                               "    regex: true\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_scrape_slow\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: (.+)\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_path\n" +\r
+                               "    target_label: __metrics_path__\n" +\r
+                               "  - action: replace\n" +\r
+                               "    regex: ([^:]+)(?::\\d+)?;(\\d+)\n" +\r
+                               "    replacement: $1:$2\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __address__\n" +\r
+                               "    - __meta_kubernetes_pod_annotation_prometheus_io_port\n" +\r
+                               "    target_label: __address__\n" +\r
+                               "  - action: labelmap\n" +\r
+                               "    regex: __meta_kubernetes_pod_label_(.+)\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_namespace\n" +\r
+                               "    target_label: kubernetes_namespace\n" +\r
+                               "  - action: replace\n" +\r
+                               "    source_labels:\n" +\r
+                               "    - __meta_kubernetes_pod_name\n" +\r
+                               "    target_label: kubernetes_pod_name\n" +\r
+                               "  scrape_interval: 5m\n" +\r
+                               "  scrape_timeout: 30s\n" +\r
+                               "alerting:\n" +\r
+                               "  alertmanagers:\n" +\r
+                               "  - kubernetes_sd_configs:\n" +\r
+                               "      - role: pod\n" +\r
+                               "    tls_config:\n" +\r
+                               "      ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n" +\r
+                               "    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n" +\r
+                               "    relabel_configs:\n" +\r
+                               "    - source_labels: [__meta_kubernetes_namespace]\n" +\r
+                               "      regex: ricplt\n" +\r
+                               "      action: keep\n" +\r
+                               "    - source_labels: [__meta_kubernetes_pod_label_app]\n" +\r
+                               "      regex: prometheus\n" +\r
+                               "      action: keep\n" +\r
+                               "    - source_labels: [__meta_kubernetes_pod_label_component]\n" +\r
+                               "      regex: alertmanager\n" +\r
+                               "      action: keep\n" +\r
+                               "    - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe]\n" +\r
+                               "      regex: .*\n" +\r
+                               "      action: keep\n" +\r
+                               "    - source_labels: [__meta_kubernetes_pod_container_port_number]\n" +\r
+                               "      regex:\n" +\r
+                               "      action: drop\n" +\r
+                               "",\r
+                       "recording_rules.yml": "{}\n" +\r
+                               "",\r
+                       "rules": "{}\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Namespace: "ricplt",\r
+                       Labels: map[string]string{\r
+                               "chart":     "prometheus-11.3.0",\r
+                               "component": "server",\r
+                               "heritage":  "Helm",\r
+                               "release":   "release-name",\r
+                               "app":       "prometheus",\r
+                       },\r
+                       Name: "release-name-prometheus-server",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap27 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "update-node.sh": "#!/bin/sh\n" +\r
+                               "REDIS_NODES=\"/data/nodes.conf\"\n" +\r
+                               "sed -i -e \"/myself/ s/[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}\\.[0-9]\\{1,3\\}/${POD_IP}/\" ${REDIS_NODES}\n" +\r
+                               "exec \"$@\"\n" +\r
+                               "",\r
+                       "redis.conf": "cluster-enabled yes\n" +\r
+                               "cluster-require-full-coverage no\n" +\r
+                               "cluster-node-timeout 15000\n" +\r
+                               "cluster-config-file /data/nodes.conf\n" +\r
+                               "cluster-migration-barrier 1\n" +\r
+                               "appendonly yes\n" +\r
+                               "protected-mode no",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "redis-cluster-cm",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap28 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       Kind:       "ConfigMap",\r
+                       APIVersion: "v1",\r
+               },\r
+               Data: map[string]string{\r
+                       "placenode.pl": "#!/usr/bin/env perl\n" +\r
+                               "=head\n" +\r
+                               "============LICENSE_START=======================================================\n" +\r
+                               "\n" +\r
+                               "================================================================================\n" +\r
+                               "Copyright (C) 2020 Hcl Technologies Limited.\n" +\r
+                               "================================================================================\n" +\r
+                               "Licensed under the Apache License, Version 2.0 (the \"License\");\n" +\r
+                               "you may not use this file except in compliance with the License.\n" +\r
+                               "You may obtain a copy of the License at\n" +\r
+                               "\n" +\r
+                               "     http://www.apache.org/licenses/LICENSE-2.0\n" +\r
+                               "\n" +\r
+                               "Unless required by applicable law or agreed to in writing, software\n" +\r
+                               "distributed under the License is distributed on an \"AS IS\" BASIS,\n" +\r
+                               "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +\r
+                               "See the License for the specific language governing permissions and\n" +\r
+                               "limitations under the License.\n" +\r
+                               "============LICENSE_END=========================================================\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "About:\n" +\r
+                               "\n" +\r
+                               "This script has been developed as part of https://jira.o-ran-sc.org/browse/RIC-360\n" +\r
+                               "This script identifies the missing anti-affinity(as per above ticket) of redis instances \n" +\r
+                               "required in a redis-cluster. If there is an  undesired  anti-affinity this script can  be \n" +\r
+                               "executed to communicate to redis nodes  to switch roles (e.g. master/slave) such that the \n" +\r
+                               "end-state meets the desired anti-affinity.\n" +\r
+                               "       \n" +\r
+                               "\n" +\r
+                               "Pre-requisites: \n" +\r
+                               "\n" +\r
+                               "  1) A redis cluster with 3 masters (2 replicas each) deployed on kubernetes 1.18 (or later) \n" +\r
+                               "  2) Three available worker nodes for serving redis workloads\n" +\r
+                               "  3) kubectl (with access to the k8 cluster)\n" +\r
+                               "\n" +\r
+                               "=cut\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "my $podRow = { \n" +\r
+                               "\"podIP\"      => \"\",\n" +\r
+                               "\"podName\"    => \"\",\n" +\r
+                               "\"k8Node\"     => \"\",\n" +\r
+                               "\n" +\r
+                               "\"rdNodeRole\" => \"\",\n" +\r
+                               "\"rdNodeID\"   => \"\",\n" +\r
+                               "\n" +\r
+                               "\"rdMasterNodeID\"   => \"\",\n" +\r
+                               "\"slaveIPs\"    => [] \n" +\r
+                               "};\n" +\r
+                               "\n" +\r
+                               "# Pod label for redis nodes\n" +\r
+                               "my $podLabel = $ENV{'POD_LABEL'};\n" +\r
+                               "\n" +\r
+                               "my $podTable =   [];\n" +\r
+                               "my $k8NodeInfo = [];\n" +\r
+                               "\n" +\r
+                               "setk8NodesInfo();\n" +\r
+                               "validate();\n" +\r
+                               "\n" +\r
+                               "# Master\n" +\r
+                               "spreadMastersIfRequired();\n" +\r
+                               "# Slave\n" +\r
+                               "my $disparity = getSlaveDisparity();\n" +\r
+                               "spreadSlavesIfRequired();\n" +\r
+                               "\n" +\r
+                               "sub validate() {\n" +\r
+                               "    my @masters = map { $_->{'rdNodeRole'} eq 'master' ? $_ : () } @{$podTable};\n" +\r
+                               "       if ( @masters > @{$k8NodeInfo->{allk8Nodes}} ) {\n" +\r
+                               "               print \"Info: Skipping any action as num of master > number of k8 nodes..\\n\";\n" +\r
+                               "           exit;\n" +\r
+                               "       }\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "sub spreadSlavesIfRequired() {\n" +\r
+                               "    \n" +\r
+                               "\n" +\r
+                               "       # Get node with maximum disparity first\n" +\r
+                               "    my @disparityMatrix = reverse sort { @{$a} <=> @{$b} } @${disparity}; \n" +\r
+                               "    #@disparityMatrix = grep defined, @disparityMatrix;\n" +\r
+                               "    #@disparityMatrix = map { defined $_ ? $_ : () } @disparityMatrix;\n" +\r
+                               "\n" +\r
+                               "    # Get list of slaves to be swapped roles.\n" +\r
+                               "    my @slaveSwapList = ();\n" +\r
+                               "    my $maxDisparityPerNode = @{$disparityMatrix[0]};\n" +\r
+                               "\n" +\r
+                               "    for (my $disparityPass=0; $disparityPass < $maxDisparityPerNode; $disparityPass++) {\n" +\r
+                               "        for (my $k8NodeIndex=0; $k8NodeIndex <= $#{disparityMatrix}; $k8NodeIndex++) {\n" +\r
+                               "                  #print \"$disparityMatrix[$disparityPass] && $disparityMatrix[$k8NodeIndex][$disparityPass]\";\n" +\r
+                               "           if ( $disparityMatrix[$disparityPass] && $disparityMatrix[$k8NodeIndex][$disparityPass] ) {\n" +\r
+                               "                           push(@slaveSwapList,$disparityMatrix[$k8NodeIndex][$disparityPass]);\n" +\r
+                               "                  }\n" +\r
+                               "        }\n" +\r
+                               "    }\n" +\r
+                               "    if ( ! @slaveSwapList ) {\n" +\r
+                               "       print \"Info: No disparity found with slaves.\\n\" if ( @slaveSwapList < 2);\n" +\r
+                               "               exit;\n" +\r
+                               "       } elsif ( @slaveSwapList == 1 ) {\n" +\r
+                               "       print \"Info: single host scenario (with no swap candidate in other k8 nodes) found.\\n\";\n" +\r
+                               "               exit;\n" +\r
+                               "       } else {\n" +\r
+                               "       print \"Info: slave disparity found.\\n\";\n" +\r
+                               "    }\n" +\r
+                               "\n" +\r
+                               "       # Swap slaves \n" +\r
+                               "       for (my $swapIndex=0; $swapIndex < @slaveSwapList; $swapIndex++) {\n" +\r
+                               "               $pod1 = $slaveSwapList[$swapIndex];\n" +\r
+                               "               $pod2 = $slaveSwapList[++$swapIndex];\n" +\r
+                               "               #print \"Info: Swapping Slaves: \" . join($pod1->{podName}, $pod2->{podName}) . \"\\n\";\n" +\r
+                               "               \n" +\r
+                               "               my $cmd1 = qq[kubectl exec -it ].\n" +\r
+                               "                                  qq[$pod1->{podName}  -- redis-cli -p 6379 cluster replicate $pod2->{rdMasterNodeID} ];\n" +\r
+                               "               \n" +\r
+                               "               my $cmd2 = qq[kubectl exec -it ].\n" +\r
+                               "                                  qq[$pod2->{podName}  -- redis-cli -p 6379 cluster replicate $pod1->{rdMasterNodeID} ];\n" +\r
+                               "\n" +\r
+                               "           runRediClusterCmd($cmd1);\n" +\r
+                               "           runRediClusterCmd($cmd2);\n" +\r
+                               "               #print \"\\n$cmd1\";\n" +\r
+                               "               #print \"\\n$cmd2\\n\";\n" +\r
+                               "    }\n" +\r
+                               "\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "sub getSlaveDisparity() {\n" +\r
+                               "\n" +\r
+                               "    # Get Slave Disparity Metrix\n" +\r
+                               "    my $disparity = ();\n" +\r
+                               "    my $nodeIndex = 0;\n" +\r
+                               "    foreach my $k8NodeName ( @{$k8NodeInfo->{allk8Nodes}} ) {\n" +\r
+                               "        my @redisNodesOnk8Node = map { $_->{'k8Node'} eq $k8NodeName ? $_ : () } @{$podTable};\n" +\r
+                               "        @redisNodesOnk8Node    = sort { $a->{\"rdNodeRole\"} cmp $b->{\"rdNodeRole\"} } @redisNodesOnk8Node;\n" +\r
+                               "\n" +\r
+                               "        my $master = shift @redisNodesOnk8Node;\n" +\r
+                               "        \n" +\r
+                               "        for (my $index=0; $index <= $#{redisNodesOnk8Node}; $index++ ) {\n" +\r
+                               "            my $slave = $redisNodesOnk8Node[$index];\n" +\r
+                               "            #print \"chekcing for pod:  $slave->{podName}\\n\";\n" +\r
+                               "            my $disparityFound = 0;\n" +\r
+                               "            if ( $slave->{rdMasterNodeID} eq $master->{rdNodeID} ) {\n" +\r
+                               "               $disparityFound = 1;\n" +\r
+                               "            } else {\n" +\r
+                               "               #check is other slaves are its sibling\n" +\r
+                               "               for (my $nextIndex=$index + 1; $nextIndex <= $#{redisNodesOnk8Node}; $nextIndex++ ) {\n" +\r
+                               "                   if ( $slave->{rdMasterNodeID} eq $redisNodesOnk8Node[$nextIndex]->{rdMasterNodeID} ) {\n" +\r
+                               "                          $disparityFound = 1;\n" +\r
+                               "                       break;\n" +\r
+                               "                   }\n" +\r
+                               "               }\n" +\r
+                               "            }\n" +\r
+                               "                       if ($disparityFound) {\n" +\r
+                               "               #$disparity[$nodeIndex][$index] = { 'podName' => $slave->{\"podName\"}, 'rdMasterNodeID' => $slave->{\"rdMasterNodeID\"} } ;\n" +\r
+                               "               push(@{$disparity[$nodeIndex]},{ 'podName' => $slave->{\"podName\"}, 'rdMasterNodeID' => $slave->{\"rdMasterNodeID\"} } ) ;\n" +\r
+                               "                       }\n" +\r
+                               "        }\n" +\r
+                               "        $nodeIndex++;\n" +\r
+                               "    }\n" +\r
+                               "        return \\@disparity;\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "sub spreadMastersIfRequired() {\n" +\r
+                               "\n" +\r
+                               "   NODE_WITH_NO_MASTER: foreach my $nodeWithoutMaster (@{$k8NodeInfo->{k8NodesWithoutMaster}}) {\n" +\r
+                               "      # For each k8Node without any master \n" +\r
+                               "      #    Check for each extra master on its hostNode\n" +\r
+                               "      #        Find its slave on the this hostNode (i.e. without any master) \n" +\r
+                               "      # Such slave must be Found for 3x3 set-up:\n" +\r
+                               "      # Then Promote as master # Re-Evaluate\n" +\r
+                               "\n" +\r
+                               "      # Get All Redis Slaves on This k8 node\n" +\r
+                               "      print \"Info: K8 node without any master : $nodeWithoutMaster\\n\";\n" +\r
+                               "      my @rdSlaveNodes =  map { ($_->{'k8Node'} eq $nodeWithoutMaster ) && ($_->{'rdNodeRole'} eq 'slave') ? $_ : () } @{$podTable};\n" +\r
+                               "\n" +\r
+                               "           foreach my $nodeWithExtraMaster (@{$k8NodeInfo->{k8NodesWithExtraMaster}} ) {\n" +\r
+                               "              print \"Info: k8 Node with extra master : $nodeWithExtraMaster\\n\";\n" +\r
+                               "              #my @rdSlaveNodes =  map { ($_->{'k8Node'} eq $nodeWithoutMaster ) && ($_->{'rdNodeRole'} eq 'slave') ? $_ : () } @{$podTable};\n" +\r
+                               "\n" +\r
+                               "              my @masterInstances = map { ($_->{'k8Node'} eq $nodeWithExtraMaster ) && ($_->{'rdNodeRole'} eq 'master') ? $_ : () } @{$podTable};        \n" +\r
+                               "              foreach my $master (@masterInstances) {\n" +\r
+                               "                  my @slave = map { $_->{\"rdMasterNodeID\"} eq $master->{rdNodeID} ? $_ : () } @rdSlaveNodes;\n" +\r
+                               "                  if ( @slave ) {\n" +\r
+                               "                      promoteSlaveAsMaster($slave[0]);\n" +\r
+                               "                                         my $isPromoted = 0;\n" +\r
+                               "                                     my $slaveNodeID= $slave[0]->{rdNodeID};\n" +\r
+                               "                                         while( ! $isPromoted ) {\n" +\r
+                               "                                                sleep(8);\n" +\r
+                               "                                            setk8NodesInfo();\n" +\r
+                               "                                                my ($promotedNode) = map { $slaveNodeID eq $_->{rdNodeID} ? $_ : () } @{$podTable};\n" +\r
+                               "\n" +\r
+                               "                                                if ( $promotedNode->{'rdNodeRole'} ne 'master' ) {\n" +\r
+                               "                                                       print (\"Info: Waiting for node promotion confirmation..\\n\");\n" +\r
+                               "                                                } else {\n" +\r
+                               "                                                       $isPromoted = 1;\n" +\r
+                               "                                                       print (\"Info: Node promotion confirmed.\\n\");\n" +\r
+                               "                                                }\n" +\r
+                               "                                         }\n" +\r
+                               "                      next NODE_WITH_NO_MASTER;\n" +\r
+                               "                  }\n" +\r
+                               "              }\n" +\r
+                               "           }\n" +\r
+                               "   }\n" +\r
+                               "   print \"Info: All redis masters are on separate k8 Nodes. \\n\"    if ( ! @{$k8NodeInfo->{k8NodesWithoutMaster}}) ;\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "sub promoteSlaveAsMaster() {\n" +\r
+                               "    my $slavePod = shift;    \n" +\r
+                               "    #print \"Info: Promoting Slave $slavePod->{'podName'} On $slavePod->{'k8Node'} as master\";\n" +\r
+                               "    my $cmd = qq[kubectl exec -it $slavePod->{'podName'} -- redis-cli -p 6379 cluster failover takeover];\n" +\r
+                               "    runRediClusterCmd($cmd);\n" +\r
+                               "    \n" +\r
+                               "}\n" +\r
+                               "sub runRediClusterCmd() {\n" +\r
+                               "  my $cmd = shift;    \n" +\r
+                               "  print \"Info: Running Cmd:$cmd \\n\";\n" +\r
+                               "  `$cmd;`;\n" +\r
+                               "  sleep(8);\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "#foreach my $item (@{$podTable}) {\n" +\r
+                               "#}\n" +\r
+                               "\n" +\r
+                               "# find_nodes_without-a-single_master\n" +\r
+                               "sub setk8NodesInfo() {\n" +\r
+                               "\n" +\r
+                               "   $podTable   = [];\n" +\r
+                               "   $k8NodeInfo = [];\n" +\r
+                               "\n" +\r
+                               "   getCurrentStatus();\n" +\r
+                               "   # All k8 nodes\n" +\r
+                               "   my @k8NodeList = uniq(map { $_->{'k8Node'} } @$podTable);\n" +\r
+                               "\n" +\r
+                               "   # Find Nodes with At least One master\n" +\r
+                               "   my @k8NodesWithMaster;\n" +\r
+                               "   foreach my $nodeName (@k8NodeList) {\n" +\r
+                               "      push(@k8NodesWithMaster, map { ($_->{'k8Node'} eq $nodeName) && ($_->{'rdNodeRole'} eq 'master')   ? $nodeName : ()  } @{$podTable} );\n" +\r
+                               "   }\n" +\r
+                               "\n" +\r
+                               "   # Find Nodes without any master = All nodes - Nodes with at least one Master\n" +\r
+                               "   my %k8NodesMap = ();\n" +\r
+                               "   foreach (@k8NodesWithMaster) { \n" +\r
+                               "           if ( exists $k8NodesMap{$_} ) {\n" +\r
+                               "                   $k8NodesMap{$_}++;\n" +\r
+                               "           } else {\n" +\r
+                               "                   $k8NodesMap{$_} = 1;\n" +\r
+                               "           }\n" +\r
+                               "   }\n" +\r
+                               "   my @k8NodesWithoutMaster = map { exists $k8NodesMap{$_} ? () : $_ } @k8NodeList;\n" +\r
+                               "   my @k8NodesWithExtraMaster = uniq(map { $k8NodesMap{$_} > 1 ? $_ : () } @k8NodesWithMaster);\n" +\r
+                               "\n" +\r
+                               "   $k8NodeInfo = { 'allk8Nodes' => \\@k8NodeList, 'k8NodesWithExtraMaster' => \\@k8NodesWithExtraMaster, 'k8NodesWithoutMaster' => \\@k8NodesWithoutMaster };\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "\n" +\r
+                               "# Validate if number of masters ,= number of rea\n" +\r
+                               "\n" +\r
+                               "#\n" +\r
+                               "#sub filter\n" +\r
+                               "\n" +\r
+                               "=head\n" +\r
+                               "get \n" +\r
+                               "podName where k8Node eq \"x\"\n" +\r
+                               "    get position of k8node eq x \n" +\r
+                               "where \n" +\r
+                               "=cut\n" +\r
+                               "\n" +\r
+                               "exit;\n" +\r
+                               "\n" +\r
+                               "sub uniq {\n" +\r
+                               "    my %seen;\n" +\r
+                               "    grep !$seen{$_}++, @_;\n" +\r
+                               "}\n" +\r
+                               "\n" +\r
+                               "sub getCurrentStatus() {\n" +\r
+                               "\n" +\r
+                               "    # Run pod list command    \n" +\r
+                               "    my @getPods = `kubectl get po --no-headers  -o wide -l $podLabel |grep Running`;    chomp @getPods;\n" +\r
+                               "    #my @getPods = `kubectl get po --no-headers  -o wide -l managed-by=redis-cluster-operator|grep Running`;    chomp @getPods;\n" +\r
+                               "\n" +\r
+                               "    foreach my $podLine (@getPods) {\n" +\r
+                               "        my @podData = split(/\\s+/,$podLine);\n" +\r
+                               "        my ($podName,$status,$age,$podIP,$podNode) = ($podData[0], $podData[2], $podData[4], $podData[5],$podData[6]);\n" +\r
+                               "\n" +\r
+                               "        #print \"$podName,$status,$age,$podIP,$podNode\" .\"\\n\"; \n" +\r
+                               "        my $podRow = { 'podIP' => $podIP, 'podName' => $podName, 'k8Node' => $podNode, 'podAge' => $age, 'podStatus' => $status };    \n" +\r
+                               "        push (@{$podTable},$podRow)\n" +\r
+                               "    }\n" +\r
+                               "\n" +\r
+                               "    my $podName = $podTable->[0]{'podName'};\n" +\r
+                               "    #print \"Info:kubectl exec $podName  -- cat nodes.conf|sort -k3\\n\";\n" +\r
+                               "    my @rdNodeData = `kubectl exec $podName  -- cat nodes.conf|sort -k3`;    chomp @rdNodeData;\n" +\r
+                               "    foreach my $rdNodeLine (@rdNodeData) {\n" +\r
+                               "        next if ($rdNodeLine !~ /master|slave/);\n" +\r
+                               "            my @rdNodeData = split(/\\s+/,$rdNodeLine);\n" +\r
+                               "            my ($rdNodeID,$rdRole,$rdMasterNodeID,$epoch) = ($rdNodeData[0], $rdNodeData[2], $rdNodeData[3],$rdNodeData[5]);\n" +\r
+                               "            my ($podIP) = split(/:/,$rdNodeData[1]);\n" +\r
+                               "            $rdRole =~ s/myself,//;\n" +\r
+                               "\n" +\r
+                               "            #print \"$rdNodeID,$rdRole,$rdMasterNodeID,$podIP\" .\"\\n\";\n" +\r
+                               "            my $rdElem = { 'podIP'    => $podIP, \n" +\r
+                               "                           'rdNodeID' => $rdNodeID,\n" +\r
+                               "                           'rdRole'   => $rdRole,\n" +\r
+                               "                           'rdMasterNodeID' => $rdMasterNodeID,\n" +\r
+                               "                           'epoch'          => $epoch\n" +\r
+                               "            };\n" +\r
+                               "\n" +\r
+                               "        for(my $index=0; $index <= $#{$podTable}; $index++) {\n" +\r
+                               "            if ( $podTable->[$index]{'podIP'} eq $podIP ) {\n" +\r
+                               "                #print \"Matched\\n\";\n" +\r
+                               "                $podTable->[$index]{'rdNodeID'}       = $rdNodeID;\n" +\r
+                               "                $podTable->[$index]{'rdNodeRole'}        = $rdRole;\n" +\r
+                               "                $podTable->[$index]{'rdMasterNodeID'} = $rdMasterNodeID;\n" +\r
+                               "                $podTable->[$index]{'epoch'}          = $epoch;\n" +\r
+                               "            }\n" +\r
+                               "        }\n" +\r
+                               "        #exit;\n" +\r
+                               "\n" +\r
+                               "    }\n" +\r
+                               "}\n" +\r
+                               "",\r
+                       "relatenode.sh": "#!/bin/sh\n" +\r
+                               "podLabel=${POD_LABEL}\n" +\r
+                               "firstPod=$(kubectl  get   po -o wide -l app.kubernetes.io/name=redis-cluster --no-headers=true|head -1|cut -d\" \" -f1)\n" +\r
+                               "\n" +\r
+                               "kubectl get po -o wide -l $podLabel |tail +2|awk '{printf(\"%s:%s:%s:%s\\n\",$6,$1,$7,$10)}'|sort  > /tmp/1.txt\n" +\r
+                               "kubectl exec  $firstPod  -- cat nodes.conf|sed 's/myself,//'|awk '/master|slave/ {print $2,$1,$3,$4}'|sort > /tmp/2.txt\n" +\r
+                               "join -t \":\"  /tmp/1.txt /tmp/2.txt |sort -k3,4 | sed 's/ /:/g'|awk -F\":\" '{print $2,$7,$3,$1,$4,$6,$8}' > /tmp/3.txt\n" +\r
+                               "\n" +\r
+                               "echo \"\\n   POD_NAME      ROLE      k8NODE        POD_IP                   REDIS_NODE_ID                       REDIS_MASTER_NODE_ID\"\n" +\r
+                               "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '1p') /tmp/3.txt\n" +\r
+                               "echo \"\"\n" +\r
+                               "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '2p') /tmp/3.txt\n" +\r
+                               "echo \"\"\n" +\r
+                               "grep $(cut -d\" \" -f4 /tmp/2.txt|sort -u|grep -v \"-\"|sed -n '3p') /tmp/3.txt",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "assigner-cm",\r
+               },\r
+       }\r
+\r
+       configMap29 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "rmr_verbose": "0\n" +\r
+                               "",\r
+                       "router.txt": "newrt|start\n" +\r
+                               "rte|10090|service-ricplt-e2term-rmr.ricplt:38000\n" +\r
+                               "newrt|end",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "configmap-ricplt-rsm-router-configmap",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap30 := &corev1.ConfigMap{\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "configmap-ricplt-rsm",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "configuration.yaml": "logging:\n" +\r
+                               "  logLevel:  \"info\"\n" +\r
+                               "http:\n" +\r
+                               "  port: 4800\n" +\r
+                               "rmr:\n" +\r
+                               "  port: 4801\n" +\r
+                               "  maxMsgSize: 4096\n" +\r
+                               "  readyIntervalSec: 1\n" +\r
+                               "rnib:\n" +\r
+                               "  maxRnibConnectionAttempts: 3\n" +\r
+                               "  rnibRetryIntervalMs: 10",\r
+               },\r
+       }\r
+\r
+       configMap31 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "RMR_RTG_SVC": "4561",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-rsm-env",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap32 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "rtmgrcfg": "\"PlatformComponents\":\n" +\r
+                               "  -\n" +\r
+                               "    \"name\": \"SUBMAN\"\n" +\r
+                               "    \"fqdn\": \"service-ricplt-submgr-rmr.ricplt\"\n" +\r
+                               "    \"port\": 4560\n" +\r
+                               "  -\n" +\r
+                               "    \"name\": \"E2MAN\"\n" +\r
+                               "    \"fqdn\": \"service-ricplt-e2mgr-rmr.ricplt\"\n" +\r
+                               "    \"port\": 3801\n" +\r
+                               "  -\n" +\r
+                               "    \"name\": \"A1MEDIATOR\"\n" +\r
+                               "    \"fqdn\": \"service-ricplt-a1mediator-rmr.ricplt\"\n" +\r
+                               "    \"port\": 4562\n" +\r
+                               "\n" +\r
+                               "\"XMURL\":\n" +\r
+                               "  \"http://service-ricplt-appmgr-http:8080/ric/v1/xapps\"\n" +\r
+                               "\"E2MURL\":\n" +\r
+                               "  \"http://service-ricplt-e2mgr-http:3800/v1/e2t/list\"\n" +\r
+                               "\"RTFILE\":\n" +\r
+                               "  \"/db/rt.json\"\n" +\r
+                               "\"CFGFILE\":\n" +\r
+                               "  \"/cfg/rtmgr-config.yaml\"\n" +\r
+                               "\"RPE\":\n" +\r
+                               "  \"rmrpush\"\n" +\r
+                               "\"SBI\":\n" +\r
+                               "  \"rmrpush\"\n" +\r
+                               "\"SBIURL\":\n" +\r
+                               "  \"0.0.0.0\"\n" +\r
+                               "\"NBI\":\n" +\r
+                               "  \"httpRESTful\"\n" +\r
+                               "\"NBIURL\":\n" +\r
+                               "  \"http://service-ricplt-rtmgr-http:3800\"\n" +\r
+                               "\"SDL\":\n" +\r
+                               "  \"file\"\n" +\r
+                               "\"local\":\n" +\r
+                               "  \"host\": \":8080\"\n" +\r
+                               "\"logger\":\n" +\r
+                               "  \"level\": 4\n" +\r
+                               "\"periodicRoutes\":\n" +\r
+                               "  \"enable\"              \n" +\r
+                               "\"rmr\":\n" +\r
+                               "  \"protPort\": \"tcp:4560\"\n" +\r
+                               "  \"maxSize\": 1024\n" +\r
+                               "  \"numWorkers\": 1\n" +\r
+                               "  \"threadType\": 1\n" +\r
+                               "\"messagetypes\": [\n" +\r
+                               "   \"RIC_HEALTH_CHECK_REQ=100\",\n" +\r
+                               "   \"RIC_HEALTH_CHECK_RESP=101\",\n" +\r
+                               "   \"RIC_ALARM=110\",\n" +\r
+                               "   \"RIC_ALARM_QUERY=111\",\n" +\r
+                               "   \"RIC_SCTP_CONNECTION_FAILURE=1080\",\n" +\r
+                               "   \"E2_TERM_INIT=1100\",\n" +\r
+                               "   \"E2_TERM_KEEP_ALIVE_REQ=1101\",\n" +\r
+                               "   \"E2_TERM_KEEP_ALIVE_RESP=1102\",\n" +\r
+                               "   \"RIC_SCTP_CLEAR_ALL=1090\",\n" +\r
+                               "   \"RAN_CONNECTED=1200\",\n" +\r
+                               "   \"RAN_RESTARTED=1210\",\n" +\r
+                               "   \"RAN_RECONFIGURED=1220\",\n" +\r
+                               "   \"RIC_ENB_LOAD_INFORMATION=10020\",\n" +\r
+                               "   \"RIC_SN_STATUS_TRANSFER=10040\",\n" +\r
+                               "   \"RIC_UE_CONTEXT_RELEASE=10050\",\n" +\r
+                               "   \"RIC_X2_SETUP_REQ=10060\",\n" +\r
+                               "   \"RIC_X2_SETUP_RESP=10061\",\n" +\r
+                               "   \"RIC_X2_SETUP_FAILURE=10062\",\n" +\r
+                               "   \"RIC_X2_RESET=10070\",\n" +\r
+                               "   \"RIC_X2_RESET_RESP=10071\",\n" +\r
+                               "   \"RIC_ENB_CONF_UPDATE=10080\",\n" +\r
+                               "   \"RIC_ENB_CONF_UPDATE_ACK=10081\",\n" +\r
+                               "   \"RIC_ENB_CONF_UPDATE_FAILURE=10082\",\n" +\r
+                               "   \"RIC_RES_STATUS_REQ=10090\",\n" +\r
+                               "   \"RIC_RES_STATUS_RESP=10091\",\n" +\r
+                               "   \"RIC_RES_STATUS_FAILURE=10092\",\n" +\r
+                               "   \"RIC_SGNB_ADDITION_REQ=10270\",\n" +\r
+                               "   \"RIC_SGNB_ADDITION_ACK=10271\",\n" +\r
+                               "   \"RIC_SGNB_ADDITION_REJECT=10272\",\n" +\r
+                               "   \"RIC_SGNB_RECONF_COMPLETE=10280\",\n" +\r
+                               "   \"RIC_SGNB_MOD_REQUEST=10290\",\n" +\r
+                               "   \"RIC_SGNB_MOD_REQUEST_ACK=10291\",\n" +\r
+                               "   \"RIC_SGNB_MOD_REQUEST_REJ=10292\",\n" +\r
+                               "   \"RIC_SGNB_MOD_REQUIRED=10300\",\n" +\r
+                               "   \"RIC_SGNB_MOD_CONFIRM=10301\",\n" +\r
+                               "   \"RIC_SGNB_MOD_REFUSE=10302\",\n" +\r
+                               "   \"RIC_SGNB_RELEASE_REQUEST=10310\",\n" +\r
+                               "   \"RIC_SGNB_RELEASE_REQUEST_ACK=10311\",\n" +\r
+                               "   \"RIC_SGNB_RELEASE_REQUIRED=10320\",\n" +\r
+                               "   \"RIC_SGNB_RELEASE_CONFIRM=10321\",\n" +\r
+                               "   \"RIC_RRC_TRANSFER=10350\",\n" +\r
+                               "   \"RIC_ENDC_X2_SETUP_REQ=10360\",\n" +\r
+                               "   \"RIC_ENDC_X2_SETUP_RESP=10361\",\n" +\r
+                               "   \"RIC_ENDC_X2_SETUP_FAILURE=10362\",\n" +\r
+                               "   \"RIC_ENDC_CONF_UPDATE=10370\",\n" +\r
+                               "   \"RIC_ENDC_CONF_UPDATE_ACK=10371\",\n" +\r
+                               "   \"RIC_ENDC_CONF_UPDATE_FAILURE=10372\",\n" +\r
+                               "   \"RIC_SECONDARY_RAT_DATA_USAGE_REPORT=10380\",\n" +\r
+                               "   \"RIC_E2_SETUP_REQ=12001\",\n" +\r
+                               "   \"RIC_E2_SETUP_RESP=12002\",\n" +\r
+                               "   \"RIC_E2_SETUP_FAILURE=12003\",\n" +\r
+                               "   \"RIC_ERROR_INDICATION=12007\",\n" +\r
+                               "   \"RIC_SUB_REQ=12010\",\n" +\r
+                               "   \"RIC_SUB_RESP=12011\",\n" +\r
+                               "   \"RIC_SUB_FAILURE=12012\",\n" +\r
+                               "   \"RIC_SUB_DEL_REQ=12020\",\n" +\r
+                               "   \"RIC_SUB_DEL_RESP=12021\",\n" +\r
+                               "   \"RIC_SUB_DEL_FAILURE=12022\",\n" +\r
+                               "   \"RIC_SUB_DEL_REQUIRED=12023\",\n" +\r
+                               "   \"RIC_CONTROL_REQ=12040\",\n" +\r
+                               "   \"RIC_CONTROL_ACK=12041\",\n" +\r
+                               "   \"RIC_CONTROL_FAILURE=12042\",\n" +\r
+                               "   \"RIC_INDICATION=12050\",\n" +\r
+                               "   \"A1_POLICY_REQ=20010\",\n" +\r
+                               "   \"A1_POLICY_RESP=20011\",\n" +\r
+                               "   \"A1_POLICY_QUERY=20012\",\n" +\r
+                               "   \"TS_UE_LIST=30000\",\n" +\r
+                               "   \"TS_QOE_PRED_REQ=30001\",\n" +\r
+                               "   \"TS_QOE_PREDICTION=30002\",\n" +\r
+                               "   \"TS_ANOMALY_UPDATE=30003\",\n" +\r
+                               "   \"TS_ANOMALY_ACK=30004\",\n" +\r
+                               "   \"MC_REPORT=30010\",\n" +\r
+                               "   \"DCAPTERM_RTPM_RMR_MSGTYPE=33001\",\n" +\r
+                               "   \"DCAPTERM_GEO_RMR_MSGTYPE=33002\",\n" +\r
+                               "   \"RIC_SERVICE_UPDATE=12030\",\n" +\r
+                               "   \"RIC_SERVICE_UPDATE_ACK=12031\",\n" +\r
+                               "   \"RIC_SERVICE_UPDATE_FAILURE=12032\",\n" +\r
+                               "   \"RIC_E2NODE_CONFIG_UPDATE=12070\",\n" +\r
+                               "   \"RIC_E2NODE_CONFIG_UPDATE_ACK==12071\",\n" +\r
+                               "   \"RIC_E2NODE_CONFIG_UPDATE_FAILURE=12072\",\n" +\r
+                               "   \"RIC_E2_RESET_REQ=12004\",\n" +\r
+                               "   \"RIC_E2_RESET_RESP=12005\",\n" +\r
+                               "   ]\n" +\r
+                               "\n" +\r
+                               "\"PlatformRoutes\": [\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_REQ', 'senderendpoint': 'SUBMAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_DEL_REQ', 'senderendpoint': 'SUBMAN', 'subscriptionid': -1,'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_DEL_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_DEL_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SUB_DEL_REQUIRED', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'SUBMAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_SETUP_REQ', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_RESET', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_RESET_RESP', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_X2_SETUP_REQ', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_ENB_CONF_UPDATE_ACK', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_ENB_CONF_UPDATE_FAILURE', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE_ACK', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE_FAILURE', 'senderendpoint': 'E2MAN', 'subscriptionid': -1, 'endpoint': '', 'meid': '%meid'},\n" +\r
+                               "  { 'messagetype': 'RIC_E2_SETUP_REQ', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'E2_TERM_INIT', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_SETUP_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_SETUP_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_RESET', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_X2_RESET_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_X2_SETUP_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_X2_SETUP_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ENDC_CONF_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SCTP_CONNECTION_FAILURE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ERROR_INDICATION', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ENB_CONF_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_ENB_LOAD_INFORMATION', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'E2_TERM_KEEP_ALIVE_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'A1_POLICY_QUERY', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'A1MEDIATOR', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'A1_POLICY_RESP', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'A1MEDIATOR', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_SERVICE_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_E2NODE_CONFIG_UPDATE', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "  { 'messagetype': 'RIC_E2_RESET_REQ', 'senderendpoint': '', 'subscriptionid': -1, 'endpoint': 'E2MAN', 'meid': ''},\n" +\r
+                               "   ]\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-rtmgr-rtmgrcfg",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
+\r
+       configMap33 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "CFGFILE":     "/cfg/rtmgr-config.yaml",\r
+                       "RMR_RTG_SVC": "4561",\r
+                       "RMR_SEED_RT": "/uta_rtg_ric.rt",\r
+                       "RMR_SRC_ID":  "service-ricplt-rtmgr-rmr.ricplt",\r
+                       "XMURL":       "http://service-ricplt-appmgr-http:8080/ric/v1/xapps",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-rtmgr-env",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       Kind:       "ConfigMap",\r
+                       APIVersion: "v1",\r
+               },\r
+       }\r
+\r
+       configMap34 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "submgrcfg": "\"local\":\n" +\r
+                               "  \"host\": \":8080\"\n" +\r
+                               "\"logger\":\n" +\r
+                               "  \"level\": 3\n" +\r
+                               "\"rmr\":\n" +\r
+                               "  \"protPort\" : \"tcp:4560\"\n" +\r
+                               "  \"maxSize\": 8192\n" +\r
+                               "  \"numWorkers\": 1\n" +\r
+                               "\"rtmgr\":\n" +\r
+                               "  \"hostAddr\": \"service-ricplt-rtmgr-http\"\n" +\r
+                               "  \"port\"    : 3800\n" +\r
+                               "  \"baseUrl\" : \"/ric/v1\"\n" +\r
+                               "\"db\":\n" +\r
+                               "  \"sessionNamespace\": \"XMSession\"\n" +\r
+                               "  \"host\": \":6379\"\n" +\r
+                               "  \"prot\": \"tcp\"\n" +\r
+                               "  \"maxIdle\": 80\n" +\r
+                               "  \"maxActive\": 12000\n" +\r
+                               "\"controls\":\n" +\r
+                               "  \"e2tSubReqTimeout_ms\": 2000\n" +\r
+                               "  \"e2tSubDelReqTime_ms\": 2000\n" +\r
+                               "  \"e2tRecvMsgTimeout_ms\": 2000\n" +\r
+                               "  \"e2tMaxSubReqTryCount\": 2\n" +\r
+                               "  \"e2tMaxSubDelReqTryCount\": 2\n" +\r
+                               "  \"checkE2State\": \"true\"\n" +\r
+                               "  \"readSubsFromDb\": \"true\"\n" +\r
+                               "  \"dbTryCount\": 200\n" +\r
+                               "  \"dbRetryForever\": \"true\"\n" +\r
+                               "  \"waitRouteCleanup_ms\": 5000\n" +\r
+                               "",\r
+                       "submgrutartg": "newrt|start\n" +\r
+                               "newrt|end\n" +\r
+                               "",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "submgrcfg",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       Kind:       "ConfigMap",\r
+                       APIVersion: "v1",\r
+               },\r
+       }\r
+\r
+       configMap35 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "SUBMGR_SEED_SN": "1",\r
+                       "CFG_FILE":       "/cfg/submgr-config.yaml",\r
+                       "RMR_RTG_SVC":    "4561",\r
+                       "RMR_SEED_RT":    "/cfg/submgr-uta-rtg.rt",\r
+                       "RMR_SRC_ID":     "service-ricplt-submgr-rmr.ricplt",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-submgr-env",\r
+               },\r
+       }\r
+\r
+       configMap36 := &corev1.ConfigMap{\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name:      "configmap-ricplt-vespamgr",\r
+                       Namespace: "ricplt",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "VESMGR_PRICOLLECTOR_SECURE":     "false",\r
+                       "VESMGR_PRICOLLECTOR_SERVERROOT": "/vescollector",\r
+                       "VESMGR_ALERTMANAGER_BIND_ADDR":  ":9095",\r
+                       "VESMGR_PRICOLLECTOR_PASSWORD":   "sample1",\r
+                       "VESMGR_PRICOLLECTOR_ADDR":       "aux-entry",\r
+                       "VESMGR_PRICOLLECTOR_PORT":       "8443",\r
+                       "VESMGR_PRICOLLECTOR_USER":       "sample1",\r
+                       "VESMGR_PROMETHEUS_ADDR":         "http://r4-infrastructure-prometheus-server.ricplt",\r
+                       "VESMGR_HB_INTERVAL":             "60s",\r
+                       "VESMGR_MEAS_INTERVAL":           "30s",\r
+               },\r
+       }\r
+\r
+       configMap37 := &corev1.ConfigMap{\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+               Data: map[string]string{\r
+                       "DEBUG":                 "true",\r
+                       "PORT":                  "8080",\r
+                       "STORAGE":               "local",\r
+                       "STORAGE_LOCAL_ROOTDIR": "/charts",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-xapp-onboarder-chartmuseum-env",\r
+               },\r
+       }\r
+\r
+       configMap38 := &corev1.ConfigMap{\r
+               Data: map[string]string{\r
+                       "CHART_REPO_URL":               "http://0.0.0.0:8080",\r
+                       "CHART_WORKSPACE_PATH":         "/tmp/xapp-onboarder",\r
+                       "HTTP_RETRY":                   "3",\r
+                       "MOCK_TEST_HELM_REPO_TEMP_DIR": "/tmp/mock_helm_repo",\r
+                       "ALLOW_REDEPLOY":               "True",\r
+                       "CHART_WORKSPACE_SIZE":         "500MB",\r
+                       "FLASK_DEBUG":                  "False",\r
+                       "FLASK_PORT":                   "8888",\r
+                       "HELM_VERSION":                 "2.12.3",\r
+                       "MOCK_TEST_MODE":               "False",\r
+               },\r
+               ObjectMeta: metav1.ObjectMeta{\r
+                       Name: "configmap-ricplt-xapp-onboarder-env",\r
+               },\r
+               TypeMeta: metav1.TypeMeta{\r
+                       APIVersion: "v1",\r
+                       Kind:       "ConfigMap",\r
+               },\r
+       }\r
 \r
-       return []*corev1.ConfigMap{configMap1, configMap2, configMap3, configMap4, configMap5, configMap6,configMap7, configMap8, configMap9, configMap10, configMap11, configMap12, configMap13, configMap14, configMap15, configMap16, configMap17, configMap18, configMap19}\r
+       return []*corev1.ConfigMap{configMap1, configMap2, configMap3, configMap4, configMap5, configMap6, configMap7, configMap8, configMap9, configMap10, configMap11, configMap12, configMap13, configMap14, configMap15, configMap16, configMap17, configMap18, configMap19, configMap20, configMap21, configMap22, configMap23, configMap24, configMap25, configMap26, configMap27, configMap28, configMap29, configMap30, configMap31, configMap32, configMap33, configMap34, configMap35, configMap36, configMap37, configMap38}\r
 }\r