# docker images. This includes all images from dockerhub and
# any other repository that hosts images for ONAP components.
repository: nexus3.onap.org:10001
- repositoryCred:
- user: docker
- password: docker
+ #repositoryCred:
+ # user: docker
+ # password: docker
# readiness check - temporary repo until images migrated to nexus3
readinessRepository: oomk8s
pullPolicy: Always
- portalHostName: "portal.ric.org"
- cookieDomain: "ric.org"
+ portalHostName: "portal.ric.o-ran-sc.org"
+ cookieDomain: "o-ran-sc.org"
# default mount path root directory referenced
# by persistent volumes and log files
persistence:
mountPath: /dockerdata-nfs
- enableDefaultStorageclass: false
+ enableDefaultStorageclass: true
parameters: {}
storageclassProvisioner: kubernetes.io/no-provisioner
volumeReclaimPolicy: Retain
image: onap/portal-app:2.5.0
persistence:
- enabled: true
+ enabled: false
dashboard:
# Override the name using the following option
# nameOverride:
+ repositoryOverride: nexus3.o-ran-sc.org:10004
+
image:
- name: ric-dashboard
- tag: 1.2.4
+ name: o-ran-sc/ric-dashboard
+ tag: 1.3.0
caasingress:
aux:
url:
onapRepository: "nexus3.onap.org:10001"
image:
- name: org.onap.dcaegen2.collectors.ves.vescollector
+ name: onap/org.onap.dcaegen2.collectors.ves.vescollector
tag: 1.4.5
mrsub:
extsvcaux:
ricip:
# The ip address of the ric cluster ingress controller
- <IP_ADDRESS of the RIC cluster>
+ 10.0.2.100
auxip:
# The ip address of the aux cluster ingress controller
- <IP_ADDRESS of the AUX cluster>
+ 10.0.2.101
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+###############################################################################
+
+#-------------------------------------------------------------------------
+# Global common setting
+#-------------------------------------------------------------------------
+global:
+ releasePrefix: r1
+ namespace:
+ xapp: ricxapp
+
+ infra: ricinfra
+ platform: ricplt
+ aux: ricaux
+ # Docker registry from which RIC platform components pull the images
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
+
+ # Name of the K8S docker credential that is onboarded by 20-credential
+ repositoryCred: docker-reg-cred
+
+ # Docker image pull policy
+ imagePullPolicy: Always
+
+ # Helm repo that will be used by xApp manager
+ helmRepository: "http:////10.0.2.100:32080/helm"
+
+ # Certificate of the helm repo
+ helmRepositoryCert: xapp-mgr-certs
+
+ # Name of the K8S secret that contains the credential of the helm repo
+ helmRepositoryCred: xapp-mgr-creds
+
+
+ # Endpoint of k8s API server
+ k8sAPIHost: https://kubernetes.default.svc.cluster.local/
+
+ # The ingress URL definitions for the ingress controller in four namespaces
+ ingressurl:
+ ric: ric-entry
+ aux: aux-entry
+ dashboard: dashboard-entry
+
+ tillers:
+ ricxapp:
+ name: ricxapp
+ nameSpace: ricxapp
+ deployNameSpace: ricinfra
+ image:
+ tillerTLSSecrets:
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
+ name: it-dep-secret
+ tag: 0.0.2
+ tiller:
+ repository: gcr.io
+ name: kubernetes-helm/tiller
+ tag: v2.12.3
+ secret:
+ create: true
+ tillerSecretName: secret-tiller-ricxapp
+ helmSecretName: secret-helm-client-ricxapp
+ tls:
+ authenticate: true
+ verify: true
+ serviceAccount:
+ name: tiller
+ role:
+ - apiGroups: [""]
+ resources: ["pods", "configmaps", "services"]
+ verbs: ["get", "list", "create", "delete"]
+ - apiGroups: ["extensions", "apps"]
+ resources: ["deployments"]
+ verbs: ["get", "list", "create", "delete"]
+ port: 44134
+
+#-------------------------------------------------------------------------
+# Infrastructure
+#-------------------------------------------------------------------------
+
+
+credential:
+ repositoryCredential:
+ user: docker
+ password: docker
+
+
+ helmrepoCredential:
+ user: helm
+ password: helm
+
+ helmCertificate: |2
+ -----BEGIN CERTIFICATE-----
+ <CERT FOR HELM>
+ -----END CERTIFICATE-----
+
+k8s:
+ enable: false
+
+chartmuseum:
+ enable: false
+ repositoryOverride: "docker.io"
+ image:
+ name: chartmuseum/chartmuseum
+ tag: v0.8.2
+ job:
+ image:
+ name: alpine
+ tag: latest
+ storagesize: 2Gi
+ datapath: /opt/data/chartmuseum-data
+
+
+elfkp:
+ enable: true
+
+kong:
+ proxy:
+ http:
+ containerPort: 32080
+ tls:
+ containerPort: 32443
+ image:
+ repository: kong
+ tag: 1.3
+ ingressController:
+ image:
+ repository: kong-docker-kubernetes-ingress-controller.bintray.io/kong-ingress-controller
+ tag: 0.6.0
platform: ricplt
aux: ricaux
# Docker registry from which RIC platform components pull the images
- repository: nexus3.o-ran-sc.org:10004
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
# Name of the K8S docker credential that is onboarded by 20-credential
repositoryCred: docker-reg-cred
imagePullPolicy: Always
# Helm repo that will be used by xApp manager
- helmRepository: "http://aux-entry/helm"
+ helmRepository: "http://10.0.2.100:32080/helm"
# Certificate of the helm repo
helmRepositoryCert: xapp-mgr-certs
deployNameSpace: ricinfra
image:
tillerTLSSecrets:
- repository: nexus3.o-ran-sc.org:10004
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
name: it-dep-secret
tag: 0.0.2
tiller:
credential:
repositoryCredential:
- user: <DOCKER USER NAME>
- password: <DOCKER PASSWORD>
+ user: docker
+ password: docker
helmrepoCredential:
- user: <HELM USER NAME>
- password: <HELM PASSWORD>
+ user: helm
+ password: helm
helmCertificate: |2
-----BEGIN CERTIFICATE-----
enable: false
chartmuseum:
- enable: false
+ enable: true
repositoryOverride: "docker.io"
image:
name: chartmuseum/chartmuseum
datapath: /opt/data/chartmuseum-data
-esreader:
- dataVolSize: 100Mi
- storageClassName: local-storage
- #storageClassName: ric-storage-class
-
- pizpub:
- enabled: false
-
elfkp:
enable: false
platform: ricplt
aux: ricaux
# Docker registry from which RIC platform components pull the images
- repository: nexus3.o-ran-sc.org:10004
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
# Name of the K8S docker credential that is onboarded by 20-credential
repositoryCred: docker-reg-cred
deployNameSpace: ricinfra
image:
tillerTLSSecrets:
- repository: nexus3.o-ran-sc.org:10004
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc
name: it-dep-secret
tag: 0.0.2
tiller:
# repositoryOverride:
image:
name: ric-plt-a1
- tag: 0.10.3
+ tag: 1.0.4
rmr_timeout_config:
rcv_retry_interval_ms: 500
rcv_retry_times: 20
name: it-dep-init
tag: 0.0.1
name: ric-plt-appmgr
- tag: 0.1.9
+ tag: 0.2.0
iterminationGracePeriodSeconds: 0
image:
name: ric-plt-dbaas
- tag: 0.1.0
+ tag: 0.2.2
# E2 Manager
e2mgr:
# repositoryOverride:
image:
name: ric-plt-e2mgr
- tag: 2.0.7
+ tag: 3.0.1
env:
RIC_ID: "bbbccc-abcd0e/20"
privilegedmode: false
# repositoryOverride:
image:
name: ric-plt-e2
- tag: 2.0.7
+ tag: 3.0.1
env:
print: "1"
privilegedmode: false
storageClassName: local-storage
#storageClassName: ric-storage-class
- pizpub:
- enabled: false
-
# Routing Manager
rtmgr:
# Use the following option to override the docker registry value
# repositoryOverride:
image:
name: ric-plt-rtmgr
- tag: 0.3.3
+ tag: 0.3.9
loglevel: DEBUG
# Subscription Manager
# repositoryOverride:
image:
name: ric-plt-submgr
- tag: 0.10.0
+ tag: 0.10.7
# VESPA Manager
vespamgr:
# repositoryOverride:
image:
name: ric-plt-vespamgr
- tag: 0.0.5
+ tag: 0.0.8
prometheusurl: "http://rec-prometheus-server.default"
# RAN Resource Monitor
rsm:
image:
- name: ric-plt-rsm
- tag: 2.0.6
+ name: ric-plt-resource-status-manager
+ tag: 3.0.1
# Jaeger Adapter
jaegeradapter:
extsvcplt:
auxip:
# The ip address of the aux cluster ingress controller
- <IP_ADDRESS of the RIC cluster>
+ 10.0.2.101
ricip:
# The ip address of the ric cluster ingress controller
- <IP_ADDRESS of the RIC cluster>
+ 10.0.2.100
#echo "__cinder_volume_id__" > /opt/config/cinder_volume_id.txt
# because cloud init user data has a 16kB limit, remove all comment lines to save space.
-sed -i "" -e '/^[ \t]*#/d' "$filename"
+# except for the #! line
+sed -i "" -e '/^[ \t]*#[^!]/d' "$filename"
chmod +x "$filename"
printenv
IPV6IF=""
-#IPV6IF="ens4"
+rm -rf /opt/config
mkdir -p /opt/config
echo "__docker_version__" > /opt/config/docker_version.txt
echo "__k8s_version__" > /opt/config/k8s_version.txt
# install low latency kernel, docker.io, and kubernetes
apt-get update
+apt-get -y autoremove
RES=$(apt-get install -y virt-what curl jq netcat 2>&1)
if [[ $RES == */var/lib/dpkg/lock* ]]; then
echo "Fail to get dpkg lock. Wait for any other package installation"
apt-get install -y linux-image-4.15.0-45-lowlatency
fi
+if kubeadm version; then
+ # remove existing Kubernetes installation
+ echo "Removing existing Kubernetes installation, version $(kubeadm version)"
+ kubeadm reset -f
+ rm -rf ~/.kube
+fi
+APTOPTS="--allow-downgrades --allow-change-held-packages --allow-unauthenticated --ignore-hold "
if [ -z ${DOCKERVERSION} ]; then
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold docker.io
+ apt-get install -y $APTOPTS docker.io
else
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold docker.io=${DOCKERVERSION}
+ apt-get install -y $APTOPTS docker.io=${DOCKERVERSION}
fi
+cat > /etc/docker/daemon.json <<EOF
+{
+ "exec-opts": ["native.cgroupdriver=systemd"],
+ "log-driver": "json-file",
+ "log-opts": {
+ "max-size": "100m"
+ },
+ "storage-driver": "overlay2"
+}
+EOF
+mkdir -p /etc/systemd/system/docker.service.d
systemctl enable docker.service
+systemctl daemon-reload
+systemctl restart docker
if [ -z ${CNIVERSION} ]; then
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubernetes-cni
+ apt-get install -y $APTOPTS kubernetes-cni
else
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubernetes-cni=${CNIVERSION}
+ apt-get install -y $APTOPTS kubernetes-cni=${CNIVERSION}
fi
if [ -z ${KUBEVERSION} ]; then
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubeadm kubelet kubectl
+ apt-get install -y $APTOPTS kubeadm kubelet kubectl
else
- apt-get install -y --allow-change-held-packages --allow-unauthenticated --ignore-hold kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
+ apt-get install -y $APTOPTS kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
fi
apt-mark hold docker.io kubernetes-cni kubelet kubeadm kubectl
# install flannel
if [[ ${KUBEV} == 1.16.* ]]; then
- kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
+ kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml"
else
- kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
+ kubectl apply -f "https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml"
fi
# waiting for all 8 kube-system pods to be in running state
# install Helm
HELMV=$(cat /opt/config/helm_version.txt)
HELMVERSION=${HELMV}
- cd /root
- mkdir Helm
- cd Helm
+ cd /root && rm -rf Helm && mkdir Helm && cd Helm
wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
if [[ ${KUBEV} == 1.16.* ]]; then
# helm init uses API extensions/v1beta1 which is depreciated by Kubernetes
# 1.16.0. Until upstream (helm) provides a fix, this is the work-around.
- helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml | sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' | kubectl apply -f -
+ helm init --service-account tiller --override spec.selector.matchLabels.'name'='tiller',spec.selector.matchLabels.'app'='helm' --output yaml > helm-init.yaml
+ sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' ./helm-init.yaml > helm-init-patched.yaml
+ kubectl apply -f ./helm-init-patched.yaml
else
helm init --service-account tiller
fi
+ helm init -c
export HELM_HOME="/root/.helm"
# waiting for tiller pod to be in running state