Updaet recipes and k8s 1node script 72/1772/1
authorwrider <lji@research.att.com>
Tue, 26 Nov 2019 04:23:58 +0000 (23:23 -0500)
committerwrider <lji@research.att.com>
Tue, 26 Nov 2019 04:24:07 +0000 (23:24 -0500)
Change-Id: I71602f47fe61a016967f7131962af911d33a44d8
Signed-off-by: wrider <lji@research.att.com>
RECIPE_EXAMPLE/RIC_AUX_RECIPE_EXAMPLE
RECIPE_EXAMPLE/RIC_INFRA_AUX_RECIPE_EXAMPLE [new file with mode: 0644]
RECIPE_EXAMPLE/RIC_INFRA_RECIPE_EXAMPLE
RECIPE_EXAMPLE/RIC_PLATFORM_RECIPE_EXAMPLE
ric-infra/00-Kubernetes/bin/gen-cloud-init.sh
ric-infra/00-Kubernetes/heat/scripts/k8s_vm_install.sh

index e83c421..977295e 100644 (file)
@@ -69,9 +69,9 @@ global:
   # docker images. This includes all images from dockerhub and
   # any other repository that hosts images for ONAP components.
   repository: nexus3.onap.org:10001
-  repositoryCred:
-    user: docker
-    password: docker
+  #repositoryCred:
+  #  user: docker
+  #  password: docker
 
   # readiness check - temporary repo until images migrated to nexus3
   readinessRepository: oomk8s
@@ -83,13 +83,13 @@ global:
   pullPolicy: Always
 
 
-  portalHostName: "portal.ric.org"
-  cookieDomain: "ric.org"
+  portalHostName: "portal.ric.o-ran-sc.org"
+  cookieDomain: "o-ran-sc.org"
   # default mount path root directory referenced
   # by persistent volumes and log files
   persistence:
     mountPath: /dockerdata-nfs
-    enableDefaultStorageclass: false
+    enableDefaultStorageclass: true
     parameters: {}
     storageclassProvisioner: kubernetes.io/no-provisioner
     volumeReclaimPolicy: Retain
@@ -141,14 +141,16 @@ portal-app:
   image: onap/portal-app:2.5.0
 
 persistence:
-  enabled: true
+  enabled: false
 
 dashboard:
   # Override the name using the following option
   # nameOverride:
+  repositoryOverride: nexus3.o-ran-sc.org:10004
+
   image:
-    name: ric-dashboard
-    tag: 1.2.4
+    name: o-ran-sc/ric-dashboard
+    tag: 1.3.0
   caasingress:
     aux:
       url:
@@ -179,7 +181,7 @@ ves:
   onapRepository: "nexus3.onap.org:10001"
 
   image:
-    name: org.onap.dcaegen2.collectors.ves.vescollector
+    name: onap/org.onap.dcaegen2.collectors.ves.vescollector
     tag: 1.4.5
 
 mrsub:
@@ -233,7 +235,7 @@ logstash:
 extsvcaux:
   ricip:
     # The ip address of the ric cluster ingress controller
-    <IP_ADDRESS of the RIC cluster>
+    10.0.2.100
   auxip:
     # The ip address of the aux cluster ingress controller
-    <IP_ADDRESS of the AUX cluster>
+    10.0.2.101
diff --git a/RECIPE_EXAMPLE/RIC_INFRA_AUX_RECIPE_EXAMPLE b/RECIPE_EXAMPLE/RIC_INFRA_AUX_RECIPE_EXAMPLE
new file mode 100644 (file)
index 0000000..91ca0b1
--- /dev/null
@@ -0,0 +1,141 @@
+################################################################################
+#   Copyright (c) 2019 AT&T Intellectual Property.                             #
+#   Copyright (c) 2019 Nokia.                                                  #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+###############################################################################
+
+#-------------------------------------------------------------------------
+# Global common setting
+#-------------------------------------------------------------------------
+global:
+  releasePrefix: r1
+  namespace:
+    xapp: ricxapp
+  
+    infra: ricinfra
+    platform: ricplt
+    aux: ricaux
+  # Docker registry from which RIC platform components pull the images
+  repository: nexus3.o-ran-sc.org:10004/o-ran-sc
+
+  # Name of the K8S docker credential that is onboarded by 20-credential 
+  repositoryCred: docker-reg-cred
+
+  # Docker image pull policy
+  imagePullPolicy: Always
+
+  # Helm repo that will be used by xApp manager
+  helmRepository: "http:////10.0.2.100:32080/helm"
+
+  # Certificate of the helm repo
+  helmRepositoryCert: xapp-mgr-certs
+
+  # Name of the K8S secret that contains the credential of the helm repo
+  helmRepositoryCred: xapp-mgr-creds
+
+
+  # Endpoint of k8s API server
+  k8sAPIHost: https://kubernetes.default.svc.cluster.local/
+
+  # The ingress URL definitions for the ingress controller in four namespaces
+  ingressurl:
+    ric: ric-entry
+    aux: aux-entry
+    dashboard: dashboard-entry
+
+  tillers:
+    ricxapp:
+      name: ricxapp
+      nameSpace: ricxapp
+      deployNameSpace: ricinfra
+      image:
+        tillerTLSSecrets:
+          repository: nexus3.o-ran-sc.org:10004/o-ran-sc
+          name: it-dep-secret
+          tag: 0.0.2
+        tiller:
+          repository: gcr.io
+          name: kubernetes-helm/tiller
+          tag: v2.12.3
+      secret:
+        create: true
+        tillerSecretName: secret-tiller-ricxapp
+        helmSecretName: secret-helm-client-ricxapp
+      tls:
+       authenticate: true
+       verify: true
+      serviceAccount:
+        name: tiller
+        role:
+        - apiGroups: [""]
+          resources: ["pods", "configmaps", "services"]
+          verbs: ["get", "list", "create", "delete"]
+        - apiGroups: ["extensions", "apps"]
+          resources: ["deployments"]
+          verbs: ["get", "list", "create", "delete"]
+      port: 44134
+
+#-------------------------------------------------------------------------
+# Infrastructure
+#-------------------------------------------------------------------------
+
+
+credential:
+  repositoryCredential:
+    user: docker
+    password: docker
+  
+  
+  helmrepoCredential:
+    user: helm
+    password: helm
+
+  helmCertificate: |2
+    -----BEGIN CERTIFICATE-----
+    <CERT FOR HELM>
+    -----END CERTIFICATE-----
+
+k8s:
+  enable: false
+
+chartmuseum:
+  enable: false
+  repositoryOverride: "docker.io"
+  image:
+    name: chartmuseum/chartmuseum
+    tag: v0.8.2
+  job:
+    image:
+      name: alpine
+      tag: latest
+  storagesize: 2Gi
+  datapath: /opt/data/chartmuseum-data
+
+
+elfkp:
+  enable: true
+
+kong:
+  proxy:
+    http:
+      containerPort: 32080
+    tls:
+      containerPort: 32443
+  image:
+    repository: kong
+    tag: 1.3
+  ingressController:
+    image:
+      repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+      tag: 0.6.0
index efeaa77..60c22b2 100644 (file)
@@ -27,7 +27,7 @@ global:
     platform: ricplt
     aux: ricaux
   # Docker registry from which RIC platform components pull the images
-  repository: nexus3.o-ran-sc.org:10004
+  repository: nexus3.o-ran-sc.org:10004/o-ran-sc
 
   # Name of the K8S docker credential that is onboarded by 20-credential 
   repositoryCred: docker-reg-cred
@@ -36,7 +36,7 @@ global:
   imagePullPolicy: Always
 
   # Helm repo that will be used by xApp manager
-  helmRepository: "http://aux-entry/helm"
+  helmRepository: "http://10.0.2.100:32080/helm"
 
   # Certificate of the helm repo
   helmRepositoryCert: xapp-mgr-certs
@@ -61,7 +61,7 @@ global:
       deployNameSpace: ricinfra
       image:
         tillerTLSSecrets:
-          repository: nexus3.o-ran-sc.org:10004
+          repository: nexus3.o-ran-sc.org:10004/o-ran-sc
           name: it-dep-secret
           tag: 0.0.2
         tiller:
@@ -93,13 +93,13 @@ global:
 
 credential:
   repositoryCredential:
-    user: <DOCKER USER NAME>
-    password: <DOCKER PASSWORD>
+    user: docker
+    password: docker
   
   
   helmrepoCredential:
-    user: <HELM USER NAME>
-    password: <HELM PASSWORD>
+    user: helm
+    password: helm
 
   helmCertificate: |2
     -----BEGIN CERTIFICATE-----
@@ -110,7 +110,7 @@ k8s:
   enable: false
 
 chartmuseum:
-  enable: false
+  enable: true
   repositoryOverride: "docker.io"
   image:
     name: chartmuseum/chartmuseum
@@ -123,14 +123,6 @@ chartmuseum:
   datapath: /opt/data/chartmuseum-data
 
 
-esreader:
-  dataVolSize: 100Mi
-  storageClassName: local-storage
-  #storageClassName: ric-storage-class
-
-  pizpub:
-    enabled: false
-
 elfkp:
   enable: false
 
index bf71500..ddc012d 100644 (file)
@@ -26,7 +26,7 @@ global:
     platform: ricplt
     aux: ricaux
   # Docker registry from which RIC platform components pull the images
-  repository: nexus3.o-ran-sc.org:10004
+  repository: nexus3.o-ran-sc.org:10004/o-ran-sc
 
   # Name of the K8S docker credential that is onboarded by 20-credential 
   repositoryCred: docker-reg-cred
@@ -59,7 +59,7 @@ global:
       deployNameSpace: ricinfra
       image:
         tillerTLSSecrets:
-          repository: nexus3.o-ran-sc.org:10004
+          repository: nexus3.o-ran-sc.org:10004/o-ran-sc
           name: it-dep-secret
           tag: 0.0.2
         tiller:
@@ -94,7 +94,7 @@ a1mediator:
   # repositoryOverride:
   image:
     name: ric-plt-a1
-    tag: 0.10.3
+    tag: 1.0.4
   rmr_timeout_config:
     rcv_retry_interval_ms: 500
     rcv_retry_times: 20
@@ -108,7 +108,7 @@ appmgr:
       name: it-dep-init
       tag: 0.0.1
     name: ric-plt-appmgr
-    tag: 0.1.9
+    tag: 0.2.0
 
 
 
@@ -120,7 +120,7 @@ dbaas:
     iterminationGracePeriodSeconds: 0
     image:
       name: ric-plt-dbaas
-      tag: 0.1.0
+      tag: 0.2.2
 
 # E2 Manager
 e2mgr:
@@ -128,7 +128,7 @@ e2mgr:
   # repositoryOverride:
   image:
     name: ric-plt-e2mgr
-    tag: 2.0.7
+    tag: 3.0.1
   env:
     RIC_ID: "bbbccc-abcd0e/20"
   privilegedmode: false
@@ -140,7 +140,7 @@ e2term:
   # repositoryOverride:
   image:
     name: ric-plt-e2
-    tag: 2.0.7
+    tag: 3.0.1
   env:
     print: "1"
   privilegedmode: false
@@ -150,16 +150,13 @@ e2term:
   storageClassName: local-storage
   #storageClassName: ric-storage-class
 
-  pizpub:
-    enabled: false
-
 # Routing Manager
 rtmgr:
   # Use the following option to override the docker registry value
   # repositoryOverride:
   image:
     name: ric-plt-rtmgr
-    tag: 0.3.3
+    tag: 0.3.9
   loglevel: DEBUG
 
 # Subscription Manager
@@ -168,7 +165,7 @@ submgr:
   # repositoryOverride:
   image:
     name: ric-plt-submgr
-    tag: 0.10.0
+    tag: 0.10.7
 
 # VESPA Manager
 vespamgr:
@@ -176,14 +173,14 @@ vespamgr:
   # repositoryOverride:
   image:
     name: ric-plt-vespamgr
-    tag: 0.0.5
+    tag: 0.0.8
   prometheusurl: "http://rec-prometheus-server.default"
 
 # RAN Resource Monitor
 rsm:
   image:
-    name: ric-plt-rsm
-    tag: 2.0.6
+    name: ric-plt-resource-status-manager
+    tag: 3.0.1
 
 # Jaeger Adapter
 jaegeradapter:
@@ -198,7 +195,7 @@ jaegeradapter:
 extsvcplt:
   auxip:
     # The ip address of the aux cluster ingress controller
-    <IP_ADDRESS of the RIC cluster>
+    10.0.2.101
   ricip:
     # The ip address of the ric cluster ingress controller
-    <IP_ADDRESS of the RIC cluster>
+    10.0.2.100
index 0d9899a..55b1bc3 100755 (executable)
@@ -142,7 +142,8 @@ sed -i "" -e "s/__stack_name__/\$(hostname)/g" "$filename"
 #echo "__cinder_volume_id__" > /opt/config/cinder_volume_id.txt
 
 # because cloud init user data has a 16kB limit, remove all comment lines to save space.
-sed -i "" -e '/^[ \t]*#/d' "$filename" 
+# except for the #! line
+sed -i "" -e '/^[ \t]*#[^!]/d' "$filename" 
 
 chmod +x "$filename"
 
index 50c73f7..2653f38 100644 (file)
@@ -61,8 +61,8 @@ echo "__host_private_ip_addr__ $(hostname)" >> /etc/hosts
 printenv
 
 IPV6IF=""
-#IPV6IF="ens4"
 
+rm -rf /opt/config
 mkdir -p /opt/config
 echo "__docker_version__" > /opt/config/docker_version.txt
 echo "__k8s_version__" > /opt/config/k8s_version.txt
@@ -148,6 +148,7 @@ echo "APT::Acquire::Retries \"3\";" > /etc/apt/apt.conf.d/80-retries
 
 # install low latency kernel, docker.io, and kubernetes
 apt-get update
+apt-get -y autoremove
 RES=$(apt-get install -y virt-what curl jq netcat 2>&1)
 if [[ $RES == */var/lib/dpkg/lock* ]]; then
   echo "Fail to get dpkg lock.  Wait for any other package installation"
@@ -161,24 +162,44 @@ if ! echo $(virt-what) | grep "virtualbox"; then
   apt-get install -y linux-image-4.15.0-45-lowlatency
 fi
 
+if kubeadm version; then
+  # remove existing Kubernetes installation
+  echo "Removing existing Kubernetes installation, version $(kubeadm version)"
+  kubeadm reset -f
+  rm -rf ~/.kube
+fi
 
+APTOPTS="--allow-downgrades --allow-change-held-packages --allow-unauthenticated --ignore-hold "
 if [ -z ${DOCKERVERSION} ]; then
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold docker.io
+  apt-get install -y $APTOPTS docker.io
 else
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold docker.io=${DOCKERVERSION}
+  apt-get install -y $APTOPTS docker.io=${DOCKERVERSION}
 fi
+cat > /etc/docker/daemon.json <<EOF
+{
+  "exec-opts": ["native.cgroupdriver=systemd"],
+  "log-driver": "json-file",
+  "log-opts": {
+    "max-size": "100m"
+  },
+  "storage-driver": "overlay2"
+}
+EOF
+mkdir -p /etc/systemd/system/docker.service.d
 systemctl enable docker.service
+systemctl daemon-reload
+systemctl restart docker
 
 if [ -z ${CNIVERSION} ]; then
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubernetes-cni
+  apt-get install -y $APTOPTS kubernetes-cni
 else
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubernetes-cni=${CNIVERSION}
+  apt-get install -y $APTOPTS kubernetes-cni=${CNIVERSION}
 fi
 
 if [ -z ${KUBEVERSION} ]; then
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubeadm kubelet kubectl
+  apt-get install -y $APTOPTS kubeadm kubelet kubectl
 else
-  apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
+  apt-get install -y $APTOPTS kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
 fi
 
 apt-mark hold docker.io kubernetes-cni kubelet kubeadm kubectl
@@ -288,9 +309,9 @@ EOF
 
   # install flannel
   if [[ ${KUBEV} == 1.16.* ]]; then
-    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+    kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
   else
-    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
+    kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml"
   fi
 
   # waiting for all 8 kube-system pods to be in running state
@@ -307,9 +328,7 @@ EOF
   # install Helm
   HELMV=$(cat /opt/config/helm_version.txt)
   HELMVERSION=${HELMV}
-  cd /root
-  mkdir Helm
-  cd Helm
+  cd /root && rm -rf Helm && mkdir Helm && cd Helm
   wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
   tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
   mv linux-amd64/helm /usr/local/bin/helm
@@ -318,10 +337,13 @@ EOF
   if [[ ${KUBEV} == 1.16.* ]]; then
     # helm init uses API extensions/v1beta1 which is depreciated by Kubernetes
     # 1.16.0.  Until upstream (helm) provides a fix, this is the work-around.
-    helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' | kubectl apply -f -
+    helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml > helm-init.yaml
+    sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' ./helm-init.yaml > helm-init-patched.yaml
+    kubectl apply -f ./helm-init-patched.yaml
   else
     helm init --service-account tiller
   fi
+  helm init -c
   export HELM_HOME="/root/.helm"
 
   # waiting for tiller pod to be in running state