4 corev1 "k8s.io/api/core/v1"
\r
5 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
\r
8 func GetConfigMap() []*corev1.ConfigMap {
\r
10 configMap1 := &corev1.ConfigMap{
\r
11 ObjectMeta: metav1.ObjectMeta{
\r
12 Name: "configmap-ricplt-a1mediator-a1conf",
\r
14 TypeMeta: metav1.TypeMeta{
\r
18 Data: map[string]string{
\r
19 "local.rt": "newrt|start\n" +
\r
20 "# Warning! this is not a functioning table because the subscription manager and route manager are now involved in a1 flows\n" +
\r
21 "# the real routing table requires subscription ids as routing is now done over sub ids, but this isn't known until xapp deploy time, it's a dynamic process triggered by the xapp manager\n" +
\r
22 "# there is a single message type for all messages a1 sends out now, subid is the other necessary piece of info\n" +
\r
23 "# there are two message types a1 listens for; 20011 (instance response) and 20012 (query)\n" +
\r
24 "# xapps likely use rts to reply with 20012 so the routing entry isn't needed for that in most cases\n" +
\r
25 "mse|20010|SUBID|service-ricxapp-admctrl-rmr.ricxapp:4563\n" +
\r
26 "rte|20011|service-ricplt-a1mediator-rmr.ricplt:4562\n" +
\r
27 "rte|20012|service-ricplt-a1mediator-rmr.ricplt:4562\n" +
\r
30 "loglevel.txt": "log-level:",
\r
34 configMap2 := &corev1.ConfigMap{
\r
35 Data: map[string]string{
\r
36 "CONFIG_MAP_NAME": "/opt/route/loglevel.txt",
\r
37 "INSTANCE_DELETE_NO_RESP_TTL": "5",
\r
38 "INSTANCE_DELETE_RESP_TTL": "10",
\r
39 "PYTHONUNBUFFERED": "1",
\r
40 "RMR_RTG_SVC": "4561",
\r
41 "RMR_SRC_ID": "service-ricplt-a1mediator-rmr.ricplt",
\r
42 "A1_RMR_RETRY_TIMES": "20",
\r
44 ObjectMeta: metav1.ObjectMeta{
\r
45 Name: "configmap-ricplt-a1mediator-env",
\r
47 TypeMeta: metav1.TypeMeta{
\r
53 configMap3 := &corev1.ConfigMap{
\r
54 Data: map[string]string{
\r
55 "ALARM_MGR_SERVICE_NAME": "service-ricplt-alarmmanager-rmr.ricplt",
\r
56 "ALARM_MGR_SERVICE_PORT": "4560",
\r
58 ObjectMeta: metav1.ObjectMeta{
\r
59 Namespace: "ricplt",
\r
60 Name: "configmap-ricplt-alarmmanager-appconfig",
\r
62 TypeMeta: metav1.TypeMeta{
\r
68 configMap4 := &corev1.ConfigMap{
\r
69 ObjectMeta: metav1.ObjectMeta{
\r
70 Name: "alarm-appconfig",
\r
71 Namespace: "ricxapp",
\r
73 TypeMeta: metav1.TypeMeta{
\r
77 Data: map[string]string{
\r
78 "ALARM_MGR_SERVICE_NAME": "service-ricplt-alarmmanager-rmr.ricplt",
\r
79 "ALARM_MGR_SERVICE_PORT": "4560",
\r
83 configMap5 := &corev1.ConfigMap{
\r
84 Data: map[string]string{
\r
85 "alarmmanagercfg": "{ \n" +
\r
87 " \"host\": \":8080\"\n" +
\r
89 " \"logger\": {\n" +
\r
93 " \"namespaces\": [\"sdl\", \"rnib\"]\n" +
\r
96 " \"protPort\": \"tcp:4560\",\n" +
\r
97 " \"maxSize\": 1024,\n" +
\r
98 " \"numWorkers\": 1\n" +
\r
100 " \"controls\": {\n" +
\r
101 " \"promAlertManager\": {\n" +
\r
102 " \"address\": \"cpro-alertmanager:80\",\n" +
\r
103 " \"baseUrl\": \"api/v2\",\n" +
\r
104 " \"schemes\": \"http\",\n" +
\r
105 " \"alertInterval\": 30000\n" +
\r
107 " \"maxActiveAlarms\": 5000,\n" +
\r
108 " \"maxAlarmHistory\": 20000,\n" +
\r
109 " \"alarmInfoPvFile\": \"/mnt/pv-ricplt-alarmmanager/alarminfo.json\"\n" +
\r
113 ObjectMeta: metav1.ObjectMeta{
\r
114 Name: "configmap-ricplt-alarmmanager-alarmmanagercfg",
\r
115 Namespace: "ricplt",
\r
117 TypeMeta: metav1.TypeMeta{
\r
123 configMap6 := &corev1.ConfigMap{
\r
124 Data: map[string]string{
\r
125 "RMR_SEED_RT": "/cfg/uta_rtg.rt",
\r
126 "RMR_SRC_ID": "service-ricplt-alarmmanager-rmr.ricplt",
\r
127 "RMR_RTG_SVC": "service-ricplt-rtmgr-rmr:4561",
\r
129 ObjectMeta: metav1.ObjectMeta{
\r
130 Name: "configmap-ricplt-alarmmanager-env",
\r
131 Namespace: "ricplt",
\r
133 TypeMeta: metav1.TypeMeta{
\r
139 configMap7 := &corev1.ConfigMap{
\r
140 Data: map[string]string{
\r
141 "appmgr.yaml": "\"local\":\n" +
\r
142 " # Port on which the xapp-manager REST services are provided\n" +
\r
143 " \"host\": \":8080\"\n" +
\r
145 " # Remote helm repo URL. UPDATE this as required.\n" +
\r
146 " \"repo\": \"\\\"http://service-ricplt-xapp-onboarder-http:8080\\\"\"\n" +
\r
148 " # Repo name referred within the xapp-manager\n" +
\r
149 " \"repo-name\": \"helm-repo\"\n" +
\r
151 " # Tiller service details in the cluster. UPDATE this as required.\n" +
\r
152 " \"tiller-service\": service-tiller-ricxapp\n" +
\r
153 " \"tiller-namespace\": ricinfra\n" +
\r
154 " \"tiller-port\": \"44134\"\n" +
\r
155 " # helm username and password files\n" +
\r
156 " \"helm-username-file\": \"/opt/ric/secret/helm_repo_username\"\n" +
\r
157 " \"helm-password-file\": \"/opt/ric/secret/helm_repo_password\"\n" +
\r
158 " \"retry\": 1\n" +
\r
160 " #Namespace to install xAPPs\n" +
\r
161 " \"namespace\": \"ricxapp\"\n" +
\r
162 " \"tarDir\": \"/tmp\"\n" +
\r
163 " \"schema\": \"descriptors/schema.json\"\n" +
\r
164 " \"config\": \"config/config-file.json\"\n" +
\r
165 " \"tmpConfig\": \"/tmp/config-file.json\"\n" +
\r
168 ObjectMeta: metav1.ObjectMeta{
\r
169 Name: "configmap-ricplt-appmgr-appconfig",
\r
171 TypeMeta: metav1.TypeMeta{
\r
177 configMap8 := &corev1.ConfigMap{
\r
178 Data: map[string]string{
\r
179 "appmgr-tiller-secret-copier.sh": "#!/bin/sh\n" +
\r
180 "if [ -x /svcacct-to-kubeconfig.sh ] ; then\n" +
\r
181 " /svcacct-to-kubeconfig.sh\n" +
\r
184 "if [ ! -z \"${HELM_TLS_CA_CERT}\" ]; then\n" +
\r
185 " kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +
\r
186 " grep 'ca.crt:' | \\\n" +
\r
187 " awk '{print $2}' | \\\n" +
\r
188 " base64 -d > ${HELM_TLS_CA_CERT}\n" +
\r
191 "if [ ! -z \"${HELM_TLS_CERT}\" ]; then\n" +
\r
192 " kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +
\r
193 " grep 'tls.crt:' | \\\n" +
\r
194 " awk '{print $2}' | \\\n" +
\r
195 " base64 -d > ${HELM_TLS_CERT}\n" +
\r
198 "if [ ! -z \"${HELM_TLS_KEY}\" ]; then\n" +
\r
199 " kubectl -n ${SECRET_NAMESPACE} get secret -o yaml ${SECRET_NAME} | \\\n" +
\r
200 " grep 'tls.key:' | \\\n" +
\r
201 " awk '{print $2}' | \\\n" +
\r
202 " base64 -d > ${HELM_TLS_KEY}\n" +
\r
205 "svcacct-to-kubeconfig.sh": "#!/bin/sh\n" +
\r
207 "# generate a kubconfig (at ${KUBECONFIG} file from the automatically-mounted\n" +
\r
208 "# service account token.\n" +
\r
209 "# ENVIRONMENT:\n" +
\r
210 "# SVCACCT_NAME: the name of the service account user. default \"default\"\n" +
\r
211 "# CLUSTER_NAME: the name of the kubernetes cluster. default \"kubernetes\"\n" +
\r
212 "# KUBECONFIG: where the generated file will be deposited.\n" +
\r
213 "SVCACCT_TOKEN=`cat /var/run/secrets/kubernetes.io/serviceaccount/token`\n" +
\r
214 "CLUSTER_CA=`base64 /var/run/secrets/kubernetes.io/serviceaccount/ca.crt|tr -d '\\n'`\n" +
\r
216 "cat >${KUBECONFIG} <<__EOF__\n" +
\r
217 "ApiVersion: v1\n" +
\r
220 "- name: ${SVCACCT_NAME:-default}\n" +
\r
222 " token: ${SVCACCT_TOKEN}\n" +
\r
225 " certificate-authority-data: ${CLUSTER_CA}\n" +
\r
226 " server: ${K8S_API_HOST:-https://kubernetes.default.svc.cluster.local/}\n" +
\r
227 " name: ${CLUSTER_NAME:-kubernetes}\n" +
\r
230 " cluster: ${CLUSTER_NAME:-kubernetes}\n" +
\r
231 " user: ${SVCACCT_NAME:-default}\n" +
\r
232 " name: svcs-acct-context\n" +
\r
233 "current-context: svcs-acct-context\n" +
\r
237 ObjectMeta: metav1.ObjectMeta{
\r
238 Name: "configmap-ricplt-appmgr-bin",
\r
240 TypeMeta: metav1.TypeMeta{
\r
246 configMap9 := &corev1.ConfigMap{
\r
247 Data: map[string]string{
\r
248 "RMR_RTG_SVC": "4561",
\r
249 "HELM_TLS_CA_CERT": "/opt/ric/secret/tiller-ca.cert",
\r
250 "HELM_TLS_CERT": "/opt/ric/secret/helm-client.cert",
\r
251 "HELM_TLS_HOSTNAME": "service-tiller-ricxapp",
\r
252 "HELM_TLS_VERIFY": "true",
\r
254 "HELM_HOST": "service-tiller-ricxapp.ricinfra:44134",
\r
255 "HELM_TLS_ENABLED": "true",
\r
256 "HELM_TLS_KEY": "/opt/ric/secret/helm-client.key",
\r
258 ObjectMeta: metav1.ObjectMeta{
\r
259 Name: "configmap-ricplt-appmgr-env",
\r
261 TypeMeta: metav1.TypeMeta{
\r
267 return []*corev1.ConfigMap{configMap1, configMap2, configMap3, configMap4, configMap5, configMap6,configMap7, configMap8, configMap9}
\r