#!/bin/bash -x
################################################################################
-# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019,2020 AT&T Intellectual Property. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
################################################################################
-# first parameter: number of expected running pods
+# first parameter: number of expected running pods
# second parameter: namespace (all-namespaces means all namespaces)
# third parameter: [optional] keyword
wait_for_pods_running () {
elif [[ ${UBUNTU_RELEASE} == 18.* ]]; then
echo "Installing on Ubuntu $UBUNTU_RELEASE (Bionic Beaver)"
if [ ! -z "${DOCKERV}" ]; then
- DOCKERVERSION="${DOCKERV}-0ubuntu1~18.04.5"
+ DOCKERVERSION="${DOCKERV}-0ubuntu1~18.04.4"
fi
else
echo "Unsupported Ubuntu release ($UBUNTU_RELEASE) detected. Exit."
# install low latency kernel, docker.io, and kubernetes
apt-get update
-apt-get -y autoremove
-RES=$(apt-get install -y virt-what curl jq netcat 2>&1)
+RES=$(apt-get install -y virt-what curl jq netcat make ipset moreutils 2>&1)
if [[ $RES == */var/lib/dpkg/lock* ]]; then
echo "Fail to get dpkg lock. Wait for any other package installation"
echo "process to finish, then rerun this script"
apt-get install -y linux-image-4.15.0-45-lowlatency
fi
-if kubeadm version; then
- # remove existing Kubernetes installation
- echo "Removing existing Kubernetes installation, version $(kubeadm version)"
- kubeadm reset -f
- rm -rf ~/.kube
-fi
-
APTOPTS="--allow-downgrades --allow-change-held-packages --allow-unauthenticated --ignore-hold "
+
+# remove infrastructure stack if present
+# note the order of the packages being removed.
+for PKG in kubeadm docker.io; do
+ INSTALLED_VERSION=$(dpkg --list |grep ${PKG} |tr -s " " |cut -f3 -d ' ')
+ if [ ! -z ${INSTALLED_VERSION} ]; then
+ if [ "${PKG}" == "kubeadm" ]; then
+ kubeadm reset -f
+ rm -rf ~/.kube
+ apt-get -y $APTOPTS remove kubeadm kubelet kubectl kubernetes-cni
+ else
+ apt-get -y $APTOPTS remove "${PKG}"
+ fi
+ fi
+done
+apt-get -y autoremove
+
+# install docker
if [ -z ${DOCKERVERSION} ]; then
apt-get install -y $APTOPTS docker.io
else
kind: KubeProxyConfiguration
mode: ipvs
EOF
- elif [[ ${KUBEV} == 1.16.* ]]; then
+ elif [[ ${KUBEV} == 1.15.* ]] || [[ ${KUBEV} == 1.16.* ]] || [[ ${KUBEV} == 1.18.* ]]; then
cat <<EOF >/root/config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kubernetesVersion: v${KUBEV}
mkdir -p .kube
cp -i /etc/kubernetes/admin.conf /root/.kube/config
chown root:root /root/.kube/config
+ export KUBECONFIG=/root/.kube/config
+ echo "KUBECONFIG=${KUBECONFIG}" >> /etc/environment
# at this point we should be able to use kubectl
kubectl get pods --all-namespaces
# install flannel
- if [[ ${KUBEV} == 1.16.* ]]; then
- kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
- else
- kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml"
- fi
+ kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
# waiting for all 8 kube-system pods to be in running state
# (at this point, minions have not joined yet)
# if running a single node cluster, need to enable master node to run pods
kubectl taint nodes --all node-role.kubernetes.io/master-
- cd /root
- # install RBAC for Helm
- kubectl create -f rbac-config.yaml
-
# install Helm
HELMV=$(cat /opt/config/helm_version.txt)
HELMVERSION=${HELMV}
+ if [ ! -e helm-v${HELMVERSION}-linux-amd64.tar.gz ]; then
+ wget https://get.helm.sh/helm-v${HELMVERSION}-linux-amd64.tar.gz
+ fi
cd /root && rm -rf Helm && mkdir Helm && cd Helm
- wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
- tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
+ tar -xvf ../helm-v${HELMVERSION}-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
+ cd /root
+ # install RBAC for Helm
+ if [[ ${HELMVERSION} == 2.* ]]; then
+ kubectl create -f rbac-config.yaml
+ fi
+
rm -rf /root/.helm
if [[ ${KUBEV} == 1.16.* ]]; then
# helm init uses API extensions/v1beta1 which is depreciated by Kubernetes
# 1.16.0. Until upstream (helm) provides a fix, this is the work-around.
- helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml > helm-init.yaml
- sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' ./helm-init.yaml > helm-init-patched.yaml
- kubectl apply -f ./helm-init-patched.yaml
+ if [[ ${HELMVERSION} == 2.* ]]; then
+ helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml > /tmp/helm-init.yaml
+ sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' /tmp/helm-init.yaml > /tmp/helm-init-patched.yaml
+ kubectl apply -f /tmp/helm-init-patched.yaml
+ fi
else
- helm init --service-account tiller
+ if [[ ${HELMVERSION} == 2.* ]]; then
+ helm init --service-account tiller
+ fi
+ fi
+ if [[ ${HELMVERSION} == 2.* ]]; then
+ helm init -c
+ export HELM_HOME="$(pwd)/.helm"
+ echo "HELM_HOME=${HELM_HOME}" >> /etc/environment
fi
- helm init -c
- export HELM_HOME="/root/.helm"
# waiting for tiller pod to be in running state
- wait_for_pods_running 1 kube-system tiller-deploy
while ! helm version; do
echo "Waiting for Helm to be ready"
sleep 15