Initial commit of seed code
[it/dep.git] / setup-1node-k8s.sh
1 #!/bin/bash
2
3 ################################################################################
4 #   Copyright (c) 2019 AT&T Intellectual Property.                             #
5 #   Copyright (c) 2019 Nokia.                                                  #
6 #                                                                              #
7 #   Licensed under the Apache License, Version 2.0 (the "License");            #
8 #   you may not use this file except in compliance with the License.           #
9 #   You may obtain a copy of the License at                                    #
10 #                                                                              #
11 #       http://www.apache.org/licenses/LICENSE-2.0                             #
12 #                                                                              #
13 #   Unless required by applicable law or agreed to in writing, software        #
14 #   distributed under the License is distributed on an "AS IS" BASIS,          #
15 #   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
16 #   See the License for the specific language governing permissions and        #
17 #   limitations under the License.                                             #
18 ################################################################################
19
20 # The intention for this script is to stand up a dev testing k8s environment
21 # that is ready for RIC installation for individual developer/team's API and functional
22 # testing needs.
23 # The integration team will maintain the synchronization of software infrastructure
24 # stack (software, versions and configurations) between this iscript and what is
25 # provided for the E2E validation testing.  Due to resource and other differences, this
26 # environment is not intended for any testing related to performance, resilience,
27 # robustness, etc.
28
29 # This script installs docker host, a one-node k8s cluster, and Helm for CoDev.
30 # This script assumes that it will be executed on an Ubuntu 16.04 VM.
31 # It is best to be run as the cloud-init script at the VM launch time, or from a
32 # "sudo -i" shell post-launch on a newly launched VM.
33 #
34
35 set -x
36
37 # for RIC R0 we keep 1.13
38 export KUBEV="1.13.3"
39 export KUBECNIV="0.6.0"
40 export DOCKERV="18.06.1"
41
42 # for new 1.14 release
43 #export KUBEVERSION="1.14.0"
44 #export KUBECNIVERSION="0.7.0"
45 #export DOCKEFV="18.06.1"
46
47 export HELMV="2.12.3"
48
49 unset FIRSTBOOT
50 unset DORESET
51
52 while getopts ":r" opt; do
53   case ${opt} in
54     r )
55       DORESET='YES'
56       ;;
57     \? )
58       echo "Usage: $0 [-r]"
59       exit
60       ;;
61   esac
62 done
63
64
65 if [ ! -e /var/tmp/firstboot4setupk8s ]; then
66   echo "First time"
67   FIRSTBOOT='YES'
68   touch /var/tmp/firstboot4setupk8s
69
70   modprobe -- ip_vs
71   modprobe -- ip_vs_rr
72   modprobe -- ip_vs_wrr
73   modprobe -- ip_vs_sh
74   modprobe -- nf_conntrack_ipv4
75
76   # disable swap
77   SWAPFILES=$(grep swap /etc/fstab | sed '/^#/ d' |cut -f1 -d' ')
78   if [ ! -z $SWAPFILES ]; then
79     for SWAPFILE in $SWAPFILES
80     do
81       echo "disabling swap file $SWAPFILE"
82       if [[ $SWAPFILE == UUID* ]]; then
83         UUID=$(echo $SWAPFILE | cut -f2 -d'=')
84         swapoff -U $UUID
85       else
86         swapoff $SWAPFILE
87       fi
88       # edit /etc/fstab file, remove line with /swapfile
89       sed -i -e "/$SWAPFILE/d" /etc/fstab
90     done
91   fi
92   # disable swap
93   #swapoff /swapfile
94   # edit /etc/fstab file, remove line with /swapfile
95   #sed -i -e '/swapfile/d' /etc/fstab
96
97
98   # add rancodev CI tool hostnames
99   echo "${__RUNRICENV_GERRIT_IP__} ${__RUNRICENV_GERRIT_HOST__}" >> /etc/hosts
100   echo "${__RUNRICENV_DOCKER_IP__} ${__RUNRICENV_DOCKER_HOST__}" >> /etc/hosts
101   echo "${__RUNRICENV_HELMREPO_IP__} ${__RUNRICENV_HELMREPO_HOST__}" >> /etc/hosts
102
103
104   # create kubenetes config file
105   if [[ ${KUBEV} == 1.13.* ]]; then
106     cat <<EOF >/root/config.yaml
107 apiVersion: kubeadm.k8s.io/v1alpha3
108 kubernetesVersion: v${KUBEV}
109 kind: ClusterConfiguration
110 apiServerExtraArgs:
111   feature-gates: SCTPSupport=true
112 networking:
113   dnsDomain: cluster.local
114   podSubnet: 10.244.0.0/16
115   serviceSubnet: 10.96.0.0/12
116
117 ---
118 apiVersion: kubeproxy.config.k8s.io/v1alpha1
119 kind: KubeProxyConfiguration
120 mode: ipvs
121 EOF
122   elif [[ ${KUBEV} == 1.14.* ]]; then
123     cat <<EOF >/root/config.yaml
124 apiVersion: kubeadm.k8s.io/v1beta1
125 kubernetesVersion: v${KUBEV}
126 kind: ClusterConfiguration
127 apiServerExtraArgs:
128   feature-gates: SCTPSupport=true
129 networking:
130   dnsDomain: cluster.local
131   podSubnet: 10.244.0.0/16
132   serviceSubnet: 10.96.0.0/12
133
134 ---
135 apiVersion: kubeproxy.config.k8s.io/v1alpha1
136 kind: KubeProxyConfiguration
137 mode: ipvs
138 EOF
139   else
140     echo "Unsupported Kubernetes version requested.  Bail."
141     exit
142   fi
143
144
145   # create a RBAC file for helm (tiller)
146   cat <<EOF > /root/rbac-config.yaml
147 apiVersion: v1
148 kind: ServiceAccount
149 metadata:
150   name: tiller
151   namespace: kube-system
152 ---
153 apiVersion: rbac.authorization.k8s.io/v1
154 kind: ClusterRoleBinding
155 metadata:
156   name: tiller
157 roleRef:
158   apiGroup: rbac.authorization.k8s.io
159   kind: ClusterRole
160   name: cluster-admin
161 subjects:
162   - kind: ServiceAccount
163     name: tiller
164     namespace: kube-system
165 EOF
166
167
168   KUBEVERSION="${KUBEV}-00"
169   CNIVERSION="${KUBECNIV}-00"
170   DOCKERVERSION="${DOCKERV}-0ubuntu1.2~16.04.1"
171   curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
172   echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list
173
174   # install low latency kernel, docker.io, and kubernetes
175   apt-get update
176   apt-get install -y linux-image-4.15.0-45-lowlatency docker.io=${DOCKERVERSION}
177   apt-get install -y kubernetes-cni=${CNIVERSION}
178   apt-get install -y --allow-unauthenticated kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
179   apt-mark hold kubernetes-cni kubelet kubeadm kubectl
180
181   # install Helm
182   HELMVERSION=${HELMV}
183   cd /root
184   mkdir Helm
185   cd Helm
186   wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
187   tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
188   mv linux-amd64/helm /usr/local/bin/helm
189
190
191   # add cert for accessing docker registry in Azure
192   mkdir -p /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__} 
193   cat <<EOF >/etc/docker/ca.crt
194 -----BEGIN CERTIFICATE-----
195 MIIEPjCCAyagAwIBAgIJAIwtTKgVAnvrMA0GCSqGSIb3DQEBCwUAMIGzMQswCQYD
196 VQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCkJlZG1pbnN0ZXIxDTALBgNV
197 BAoMBEFUJlQxETAPBgNVBAsMCFJlc2VhcmNoMTswOQYDVQQDDDIqLmRvY2tlci5y
198 YW5jby1kZXYtdG9vbHMuZWFzdHVzLmNsb3VkYXBwLmF6dXJlLmNvbTEjMCEGCSqG
199 SIb3DQEJARYUcmljQHJlc2VhcmNoLmF0dC5jb20wHhcNMTkwMTI0MjA0MzIzWhcN
200 MjQwMTIzMjA0MzIzWjCBszELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5KMRMwEQYD
201 VQQHDApCZWRtaW5zdGVyMQ0wCwYDVQQKDARBVCZUMREwDwYDVQQLDAhSZXNlYXJj
202 aDE7MDkGA1UEAwwyKi5kb2NrZXIucmFuY28tZGV2LXRvb2xzLmVhc3R1cy5jbG91
203 ZGFwcC5henVyZS5jb20xIzAhBgkqhkiG9w0BCQEWFHJpY0ByZXNlYXJjaC5hdHQu
204 Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuAW1O52l9/1L+D7x
205 Qv+782FjiELP9MWO0RCAL2JzV6Ioeo1IvuZl8jvXQVGlowanCdz1HZlMJjGm6Ybv
206 60dVECRSMZeOxUQ0JCus6thxOhDiiCFT59m+MpdrRgHqwOzw+8B49ZwULv+lTIWt
207 ETEQkSYTh4No9jCxnyVLKH9DyTbaW/xFK484u5f4bh7mI5uqDJapOCRvJXv8/J0E
208 eMrkCVmk5qy0ii8I7O0oCNl61YvC5by9GCeuQhloJJc6gOjzKW8nK9JfUW8G34bC
209 qnUj79EgwgtW/8F5SYAF5LI0USM0xXjyzlnPMbv5mikrbf0EZkZXdUreICUIzY53
210 HRocCQIDAQABo1MwUTAdBgNVHQ4EFgQUm9NbNhZ3Zp1f50DIN4/4fvWQSNswHwYD
211 VR0jBBgwFoAUm9NbNhZ3Zp1f50DIN4/4fvWQSNswDwYDVR0TAQH/BAUwAwEB/zAN
212 BgkqhkiG9w0BAQsFAAOCAQEAkbuqbuMACRmzMXFKoSsMTLk/VRQDlKeubdP4lD2t
213 Z+2dbhfbfiae9oMly7hPCDacoY0cmlBb2zZ8lgA7kVvuw0xwX8mLGYfOaNG9ENe5
214 XxFP8MuaCySy1+v5CsNnh/WM3Oznc6MTv/0Nor2DeY0XHQtM5LWrqyKGZaVAKpMW
215 5nHG8EPIZAOk8vj/ycg3ca3Wv3ne9/8rbrrxDJ3p4L70DOtz/JcQai10Spct4S0Z
216 7yd4tQL+QSQCvmN7Qm9+i52bY0swYrUAhbNiEX3yJDryKjSCPirePcieGZmBRMxr
217 7j28jxpa4g32TbWR/ZdxMYEkCVTFViTE23kZdNvahHKfdQ==
218 -----END CERTIFICATE-----
219 EOF
220   cp /etc/docker/ca.crt /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/ca.crt
221   service docker restart
222   systemctl enable docker.service
223   docker login -u ${__RUNRICENV_DOCKER_USER__} -p ${__RUNRICENV_DOCKER_PASS__} ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
224   docker pull ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/whoami:0.0.1
225
226
227   # test access to k8s docker registry
228   kubeadm config images pull
229 else
230   echo "Not first boot"
231
232   kubectl get pods --all-namespaces
233 fi
234
235
236 if [ -n "$DORESET" ]; then
237   kubeadm reset
238 fi
239
240 if [ -n ${DORESET+set} ] || [ -n ${FIRSTBOOT+set} ]; then
241   # start cluster (make sure CIDR is enabled with the flag)
242   kubeadm init --config /root/config.yaml
243
244   # set up kubectl credential and config
245   cd /root
246   rm -rf .kube
247   mkdir -p .kube
248   cp -i /etc/kubernetes/admin.conf /root/.kube/config
249   chown root:root /root/.kube/config
250
251   # at this point we should be able to use kubectl
252   kubectl get pods --all-namespaces
253   # you will see the DNS pods stuck in pending state.  They are waiting for some networking to be installed.
254
255   # install flannel
256   # kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
257   kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
258
259   # waiting for all kube-system pods to be in running state
260   NUMPODS=0
261   while [  $NUMPODS -lt 8 ]; do
262     sleep 5
263     OUTPUT=$(kubectl get pods --all-namespaces |grep Running)
264     NUMPODS=$(echo "$OUTPUT" | wc -l)
265     echo "Waiting for $NUMPODS / 8 kube-system pods reaching Running state"
266   done
267
268   # if running a single node cluster, need to enable master node to run pods
269   kubectl taint nodes --all node-role.kubernetes.io/master-
270
271   cd /root
272   # install RBAC for Helm
273   kubectl create -f rbac-config.yaml
274
275   rm -rf .helm
276   helm init --service-account tiller
277   
278   
279   cat <<EOF >/etc/ca-certificates/update.d/helm.crt
280 -----BEGIN CERTIFICATE-----
281 MIIESjCCAzKgAwIBAgIJAIU+AfULkw0PMA0GCSqGSIb3DQEBCwUAMIG5MQswCQYD
282 VQQGEwJVUzETMBEGA1UECAwKTmV3IEplcnNleTETMBEGA1UEBwwKQmVkbWluc3Rl
283 cjENMAsGA1UECgwEQVQmVDERMA8GA1UECwwIUmVzZWFyY2gxOTA3BgNVBAMMMCou
284 aGVsbS5yYW5jby1kZXYtdG9vbHMuZWFzdHVzLmNsb3VkYXBwLmF6dXJlLmNvbTEj
285 MCEGCSqGSIb3DQEJARYUcmljQHJlc2VhcmNoLmF0dC5jb20wHhcNMTkwMzIxMTU1
286 MzAwWhcNMjEwMzIwMTU1MzAwWjCBuTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCk5l
287 dyBKZXJzZXkxEzARBgNVBAcMCkJlZG1pbnN0ZXIxDTALBgNVBAoMBEFUJlQxETAP
288 BgNVBAsMCFJlc2VhcmNoMTkwNwYDVQQDDDAqLmhlbG0ucmFuY28tZGV2LXRvb2xz
289 LmVhc3R1cy5jbG91ZGFwcC5henVyZS5jb20xIzAhBgkqhkiG9w0BCQEWFHJpY0By
290 ZXNlYXJjaC5hdHQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
291 tguhSQx5Dk2w+qx2AOcFRz7IZBASEehK1Z4f5jz2KrRylGx6jjedCZASdm1b0ZEB
292 /ZNrKht1zsWDETa7x0DF+q0Z2blff+T+6+YrJWhNxYHgZiYVi9gTuNDzpn8VVn7f
293 +cQxcMguHo1JBDIotOLubJ4T3/oXMCPv9kRSLHcNjbEE2yTB3AqXu9dvrDXuUdeU
294 ot6RzxhKXxRCQXPS2/FDjSV9vr9h1dv5fIkFXihpYaag0XqvXcqgncvcOJ1SsLc3
295 DK+tyNknqG5SL8y2a7U4F7u+qGO2/3tnCO0ggYwa73hS0pQPY51EpRSckZqlfKEu
296 Ut0s3wlEFP1VaU0RfU3aIwIDAQABo1MwUTAdBgNVHQ4EFgQUYTpoVXZPXSR/rhjr
297 pu9PPhL7f9IwHwYDVR0jBBgwFoAUYTpoVXZPXSR/rhjrpu9PPhL7f9IwDwYDVR0T
298 AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAUDLbiKVIW6W9qFXLtoyO7S2e
299 IOUSZ1F70pkfeYUqegsfFZ9njPtPqTzDfJVxYqH2V0vxxoAxXCYCpNyR6vYlYiEL
300 R+oyxuvauW/yCoiwKBPYa4fD/PBajJnEO1EfIwZvjFLIfw4GjaX59+zDS3Zl0jT/
301 w3uhPSsJAYXtDKLZ14btA27cM5mW4kmxVD8CRdUW0jr/cN3Hqe9uLSNWCNiDwma7
302 RnpK7NnOgXHyhZD/nVC0nY7OzbK7VHFJatSOjyuMxgWsFGahwYNxf3AWfPwUai0K
303 ne/fVFGZ6ifR9QdD0SuKIAEuqSyyP4BsQ92uEweU/gWKsnM6iNVmNFX8UOuU9A==
304 -----END CERTIFICATE-----
305 EOF
306
307   # waiting for tiller pod to be in running state
308   NUMPODS=0
309   while [ $NUMPODS -lt 1 ]; do
310     sleep 5
311     OUTPUT=$(kubectl get pods --all-namespaces |grep Running)
312     NUMPODS=$(echo "$OUTPUT" | grep "tiller-deploy" | wc -l)
313     echo "Waiting for $NUMPODS / 1 tiller-deploy pod reaching Running state"
314   done
315
316   echo "All up"
317
318   #reboot
319 fi