Enabling helm3 & kubernetes 1.18 for RIC in shell scripts without breaking existing...
[it/dep.git] / tools / k8s / heat / scripts / k8s_vm_aux_install.sh
1 # this script installs AUX infrastructure components
2
3 # continue only on AUX cluster
4 CINDER_V_ID=$(cat /opt/config/cinder_volume_id.txt)
5 cat <<EOF > ./cinder_pv.yaml
6 apiVersion: "v1"
7 kind: "PersistentVolume"
8 metadata:
9   name: "cinder-pv"
10 spec:
11   capacity:
12     storage: "5Gi"
13   accessModes:
14     - "ReadWriteOnce"
15   cinder:
16     fsType: "ext3"
17     volumeID: "$CINDER_V_ID"
18 EOF
19 kubectl create -f ./cinder_pv.yaml
20
21
22 # install fluentd
23 LOGGING_NS="logging"
24 kubectl create namespace "${LOGGING_NS}"
25 while ! helm repo add incubator "https://kubernetes-charts-incubator.storage.googleapis.com/"; do
26   sleep 10
27 done
28 IS_HELM3=$(helm version --short|grep -e "^v3")
29 HELM_FLAG='--name'
30 if [ -z $IS_HELM3 ]
31    HELM_FLAG=""
32 fi
33
34 helm repo update
35 helm install ${HELM_FLAG} elasticsearch \
36    --namespace "${LOGGING_NS}" \
37    --set image.tag=6.7.0 \
38    --set data.terminationGracePeriodSeconds=0 \
39    --set master.persistence.enabled=false \
40    --set data.persistence.enabled=false \
41    incubator/elasticsearch 
42 helm install ${HELM_FLAG} fluentd \
43    --namespace "${LOGGING_NS}" \
44    --set elasticsearch.host=elasticsearch-client.${LOGGING_NS}.svc.cluster.local \
45    --set elasticsearch.port=9200 \
46    stable/fluentd-elasticsearch
47 helm install ${HELM_FLAG} kibana \
48    --namespace "${LOGGING_NS}" \
49    --set env.ELASTICSEARCH_URL=http://elasticsearch-client.${LOGGING_NS}.svc.cluster.local:9200 \
50    --set env.ELASTICSEARCH_HOSTS=http://elasticsearch-client.${LOGGING_NS}.svc.cluster.local:9200 \
51    --set env.SERVER_BASEPATH=/api/v1/namespaces/${LOGGING_NS}/services/kibana/proxy \
52    stable/kibana
53    #--set image.tag=6.4.2 \
54
55 KIBANA_POD_NAME=$(kubectl get pods --selector=app=kibana -n  "${LOGGING_NS}" \
56    --output=jsonpath="{.items..metadata.name}")
57 wait_for_pods_running 1 "${LOGGING_NS}" "${KIBANA_POD_NAME}"
58
59
60 # install prometheus
61 PROMETHEUS_NS="monitoring"
62 OPERATOR_POD_NAME="prometheus-prometheus-operator-prometheus-0"
63 ALERTMANAGER_POD_NAME="alertmanager-prometheus-operator-alertmanager-0"
64 helm install ${HELM_FLAG} prometheus-operator  --namespace "${PROMETHEUS_NS}" stable/prometheus-operator
65 wait_for_pods_running 1 "${PROMETHEUS_NS}" "${OPERATOR_POD_NAME}"
66
67 GRAFANA_POD_NAME=$(kubectl get pods --selector=app=grafana -n  "${PROMETHEUS_NS}" \
68    --output=jsonpath="{.items..metadata.name}")
69
70
71
72 cat <<EOF > ./ingress_lm.yaml
73 apiVersion: extensions/v1beta1
74 kind: Ingress
75 metadata:
76   name: ingress-lm
77   annotations:
78     nginx.ingress.kubernetes.io/rewrite-target: /
79 spec:
80   rules:
81   - http:
82       paths:
83       - path: /kibana
84         backend:
85           serviceName: kibana
86           servicePort: 5601
87       - path: /operator
88         backend:
89           serviceName: prometheus-operator-prometheus 
90           servicePort: 9090
91       - path: /alertmanager
92         backend:
93           serviceName: prometheus-operator-alertmanager
94           servicePort: 9093
95       - path: /grafana
96         backend:
97           serviceName: prometheus-operator-grafana
98           servicePort: 3000
99 EOF
100 kubectl apply -f ingress-lm.yaml
101