--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+###############################################################################
+
+#-------------------------------------------------------------------------
+# Global common setting
+#-------------------------------------------------------------------------
+global:
+ # Docker registry from which RIC platform components pull the images
+ repository: nexus3.o-ran-sc.org:10004
+
+ # Name of the K8S docker credential that is onboarded by 20-credential
+ repositoryCred: docker-reg-cred
+
+ # Docker image pull policy
+ imagePullPolicy: Always
+
+ # Helm repo that will be used by xApp manager
+ helmRepository: nexus.ricinfra.local
+
+ # Certificate of the helm repo
+ helmRepositoryCert: xapp-mgr-certs
+
+ # Name of the K8S secret that contains the credential of the helm repo
+ helmRepositoryCred: xapp-mgr-creds
+
+ # The name of the tiller that xApp helm client talks to
+ helmRepositoryTiller: tiller-deploy
+
+ # The namespace of the xApp helm tiller
+ helmRepositoryTillerNamespace: kube-system
+
+ # The port the xApp helm tiller is listening to
+ helmRepositoryTillerPort: 44134
+
+
+
+
+#-------------------------------------------------------------------------
+# Auxiliary Functions
+#-------------------------------------------------------------------------
+dashboard:
+ image:
+ name:: ric-dash-be
+ tag: 1.0.0-SNAPSHOT
+
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+###############################################################################
+
+#-------------------------------------------------------------------------
+# Global common setting
+#-------------------------------------------------------------------------
+global:
+ # Docker registry from which RIC platform components pull the images
+ repository: nexus3.o-ran-sc.org:10004
+
+ # Name of the K8S docker credential that is onboarded by 20-credential
+ repositoryCred: docker-reg-cred
+
+ # Docker image pull policy
+ imagePullPolicy: Always
+
+ # Helm repo that will be used by xApp manager
+ helmRepository: nexus.ricinfra.local
+
+ # Certificate of the helm repo
+ helmRepositoryCert: xapp-mgr-certs
+
+ # Name of the K8S secret that contains the credential of the helm repo
+ helmRepositoryCred: xapp-mgr-creds
+
+ # The name of the tiller that xApp helm client talks to
+ helmRepositoryTiller: tiller-deploy
+
+ # The namespace of the xApp helm tiller
+ helmRepositoryTillerNamespace: kube-system
+
+ # The port the xApp helm tiller is listening to
+ helmRepositoryTillerPort: 44134
+
+#-------------------------------------------------------------------------
+# Infrastructure
+#-------------------------------------------------------------------------
+
+cluster:
+ deployK8S: false
+ deployNexus: false
+ useLocalHelmRepo: false
+ useLocalDockerRegistry: false
+
+
+repositoryCredential:
+ user: "docker"
+ password: "docker"
+
+
+helmrepoCredential:
+ user: "helm"
+ password: "docker"
+
+
+adminPassword: admin123
+
+datapath: /tmp/nexus3-data/
imagePullPolicy: Always
# Helm repo that will be used by xApp manager
- helmRepository: nexus.ricinfra.local
+ helmRepository: helm.ricinfra.local
# Certificate of the helm repo
helmRepositoryCert: xapp-mgr-certs
# The port the xApp helm tiller is listening to
helmRepositoryTillerPort: 44134
-#-------------------------------------------------------------------------
-# Infrastructure
-#-------------------------------------------------------------------------
-
-
-
-
-#-------------------------------------------------------------------------
-# Auxiliary Functions
-#-------------------------------------------------------------------------
-dashboard:
- image:
- name:: ric-dash-be
- tag: 1.0.0-SNAPSHOT
#-------------------------------------------------------------------------
# Platform Components
# xApp Manager
appmgr:
image:
- name: xapp-manager
- tag: latest
+ name: ric-plt-appmgr
+ tag: 0.1.3
# DBAAS
dbaas:
backend:
image:
- name: redis-standalone
- tag: latest
+ name: ric-plt-dbaas
+ tag: 0.1.0
# E2 Manager
e2mgr:
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# This script deploys RIC auxiliary function components automatically
+
+OVERRIDEYAML=$1
+
+
+
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+
+source $DIR/../etc/ric-infra.conf
+
+export RICINFRA_RELEASE_NAME=$ric_infra_helm_release_name
+
+export RICINFRA_NAMESPACE=$ric_infra_namespace
+
+export DEPLOY_K8S=$deployK8S
+
+export DEPLOY_NEXUS=$deployNexus
+
+export USE_LOCAL_HELM_REPO=$useLocalHelmRepo
+
+export USE_LOCAL_DOCKER_REGISTRY=$useLocalDockerRegistry
+
+
+
+
+
+source $DIR/../etc/ric-platform.conf
+
+export RICPLT_RELEASE_NAME=$ric_platform_helm_release_name
+
+export RICPLT_NAMESPACE=$ric_platform_namespace
+
+
+
+
+if [ -z "$OVERRIDEYAML" ];then
+ echo "****************************************************************************************************************"
+ echo " WARNING "
+ echo "****************************************************************************************************************"
+ echo "Deploying RIC infrastructure components without deployment recipe. Default configurations are used."
+ echo "****************************************************************************************************************"
+
+else
+
+ export DEPLOY_K8S=$(cat $OVERRIDEYAML | grep deployK8S | awk '{print $2}')
+ export DEPLOY_NEXUS=$(cat $OVERRIDEYAML | grep deployNexus | awk '{print $2}')
+ export USE_LOCAL_HELM_REPO=$(cat $OVERRIDEYAML | grep useLocalHelmRepo | awk '{print $2}')
+ export USE_LOCAL_DOCKER_REGISTRY=$(cat $OVERRIDEYAML | grep useLocalDockerRegistry | awk '{print $2}')
+fi
+
+
+
+if $USE_LOCAL_HELM_REPO && [ "$DEPLOY_NEXUS" != "true" ];then
+ echo "****************************************************************************************************************"
+ echo " ERROR "
+ echo "****************************************************************************************************************"
+ echo "Local helm repo cannot be used if Nexus is not installed. Please change your configurations in the deployment recipe."
+ echo "****************************************************************************************************************"
+ exit 1
+fi
+
+
+if $USE_LOCAL_DOCKER_REGISTRY && [ "$DEPLOY_NEXUS" != "true" ];then
+ echo "****************************************************************************************************************"
+ echo " ERROR "
+ echo "****************************************************************************************************************"
+ echo "Local docker registry cannot be used if Nexus is not installed. Please change your configurations in the deployment recipe."
+ echo "****************************************************************************************************************"
+ exit 1
+fi
+
+
+if $DEPLOY_K8S; then
+ echo "Deploying K8S. Please make sure that you configure files in ./ric-infra/00-Kubernetes/etc correctly."
+ . ../ric-infra/00-Kubernetes/bin/install
+fi
+
+
+if $DEPLOY_NEXUS; then
+ echo "Deploying Nexus."
+ . ../ric-infra/10-Nexus/bin/install $OVERRIDEYAML
+fi
+
+
+
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# ------------------- RIC AUX ---------------------------------------
+
+# release name helm will use to deploy RIC infrastructure componenets
+ric_infra_helm_release_name=r0
+
+# namespace that helm will use to deploy RIC infrastructure componenets
+ric_infra_namespace=ricinfra
+
+# deploy the kubernete cluster
+deployK8S=false
+
+# deploy a nexus repository manager
+deployNexus=true
+
+# use the local helm repo
+useLocalHelmRepo=true
+
+# use the local docker registry
+useLocalDockerRegistry=false
apiVersion: v1
appVersion: "1.0"
-description: Helm chart for the RIC Dashboard web app
+description: Helm Chart for RIC Dashboard
name: dashboard
version: 1.1.0
+icon: https://gerrit.o-ran-sc.org/r/gitweb?p=portal/ric-dashboard.git;a=blob;f=webapp-frontend/src/assets/at_t.png;h=3cced1d5ce4668fbf3b33064aaaa6920bc8130b6;hb=HEAD
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# Defines a config map for mounting as file application.properties
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "dashboard.fullname" . }}-appconfig
+data:
+ # this key becomes the filename when mounted
+ application.properties: |
+ # appconfig.yaml
+ server.port = {{ .Values.dashboard.server.port }}
+ a1med.url = {{ .Values.dashboard.properties.a1med.url }}
+ anrxapp.url = {{ .Values.dashboard.properties.anrxapp.url }}
+ e2mgr.url = {{ .Values.dashboard.properties.e2mgr.url }}
+ xappmgr.url = {{ .Values.dashboard.properties.xappmgr.url }}
imagePullPolicy: {{ include "common.pullPolicy" . }}
ports:
- name: http
- containerPort: 8080
+ containerPort: {{ .Values.dashboard.server.port }}
protocol: TCP
- env:
- - name: SPRING_APPLICATION_JSON
- valueFrom:
- configMapKeyRef:
- name: dashboard-configmap
- key: springApplication.json
+ volumeMounts:
+ - name: application-config
+ mountPath: /maven/application.properties
+ subPath: application.properties
+ readOnly: true
livenessProbe:
httpGet:
path: /
httpGet:
path: /
port: http
+ volumes:
+ - name: application-config
+ configMap:
+ name: {{ include "dashboard.fullname" . }}-appconfig
spec:
type: {{ .Values.dashboard.service.type }}
ports:
- - port: {{ .Values.dashboard.service.port }}
+ - port: {{ .Values.dashboard.server.port }}
+ nodePort: {{ .Values.dashboard.service.port }}
targetPort: http
protocol: TCP
selector:
# limitations under the License. #
################################################################################
-# Default values for dashboard.
-# This is a YAML-formatted file.
-# Declare variables to be passed into your templates.
+# Deployment values for dashboard.
-replicaCount: 1
repository: "nexus3.o-ran-sc.org:10004"
imagePullPolicy: IfNotPresent
repositoryCred: docker-reg-cred
dashboard:
image:
name: ric-dash-be
- tag: 1.0.0-SNAPSHOT
-
-
+ tag: 1.0.4
+ replicaCount: 1
+ server:
+ # Tomcat listens here
+ port: 8080
service:
type: NodePort
- port: 80
-
+ port: 30080
+ name: dashboard-service
+ # config URLs must be specified at deployment
+ properties:
+ a1med:
+ url: http://values-yaml-default-A1-URL
+ anrxapp:
+ url: http://values-yaml-default-ANR-URL
+ e2mgr:
+ url: http://values-yaml-default-E2-URL
+ xappmgr:
+ url: http://values-yaml-default-MGR-URL
--- /dev/null
+### Introduction
+
+This directory contains configurations, templates, and scripts for deploying a Kubernetes cluster for RIC and other AUX functions.
+
+Two methods of deployment are supported:
+- Single node Kubernetes cluster deployment:
+ - A cloud init script that installs the docker-kubernetes-helm stack onto a VM launched using cloud Ubuntu 16.04 image.
+ - The same script can be run on a newly launched VM using cloud Ubuntu 16.04 image to install the same infrastructure software stack.
+- Multi-node, dual-cluster deployment:
+ - Using OpenStack Heat Orchestration Template, calling OpenStack stack creation API to create two sets of VMs, one for RIC cluster and the other for AUX cluster.
+ - Installing docker-kubernetes-helm stack on each of the VMs.
+ - Configuring each set of VMs into a Kubernets cluster.
+ - Configure well-known host name resolutions.
+
+### Directory Structure
+- bin
+ - deploy-stack.sh
+ - gen-cloud-init.sh
+ - gen-ric-heat-yaml.sh
+ - install
+ - undeploy-stack.sh
+- etc
+ - env.rc
+ - infra.rc
+ - openstack.rc
+- heat
+ - env
+ - aux.env
+ - ric.env
+ - parts
+ - part-1-v6.yaml
+ - part-1.yaml
+ - part-2-v6.yaml
+ - part-2.yaml
+ - part-3-v6.yaml
+ - part-3.yaml
+ - scripts
+ - k8s_vm_aux_install.sh
+ - k8s_vm_custom_repos.sh
+ - k8s_vm_init.sh
+ - k8s_vm_init_serv.sh
+ - k8s_vm_install.sh
+
+
+### Configuration
+All configurations are under etc directory
+- env.rc
+ - This file contains configurations for Gerrit, Helm, and Docker registry that will be used for hosting artifacts for the deployment.
+- infra.rc
+ - This file contains configuratuions infrastructure software stack, e.g. versions of docker, Kubernetes, and Helm software to be installed.
+ - Normally there is no need to modify this file.
+- openstack.rc
+ - This file contains configuratuions for the local OpenStack instance that will be used for deploying the Heat stacks.
+
+
+### Deploying 1-node Kubernetes
+
+1. Must complete the local configuration in etc/env.rc file.
+2. cd bin
+3. ./gen-cloud-init.sh
+4. The generated cloud init file is named k8s-1node-cloud-init.sh
+5. Use the generate k8s-1node-cloud-init.sh script:
+ a. At VM launch time, paste in the contents of the k8s-1node-cloud-init.sh file to the "Customnization script" window of the "Configuration" step, when using Horizon dashboard to launch new VM.
+ b. Copy the k8s-1node-cloud-init.sh file to a newly launched cloud image Ubuntu 16.04 VM. Run the script in a "sudo -i" shell.
+6. After the execution of the script is completed, run "kubectl get pods --all-namespaces" to check.
+
+### Deploying Dual Kubernetes Cluster
+1. Must complete the local configuration in etc/env.rc and etc/openstack.rc files.
+2. cd bin
+3. ./install
+4. After the execution is completed, go to WORKDIR_ric and WORKDIR_aux to see the file that contains the IP addresses of the VMs.
+5. ssh into the -mst VMs (master nodes) of the clusters, run run "kubectl get pods --all-namespaces" to check.
+
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+set -e
+
+stack_name="ric"
+full_deletion=false
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+set -a
+RCS="$(find $DIR/../etc -type f -maxdepth 1)"
+for RC in $RCS; do
+ echo "reading in values in $RC"
+ source $RC
+done
+set +a
+
+
+if [ -z "$__RUNRICENV_GERRIT_HOST__" ]; then
+ export __RUNRICENV_GERRIT_HOST__=$gerrithost
+fi
+if [ -z "$__RUNRICENV_GERRIT_IP__" ]; then
+ export __RUNRICENV_GERRIT_IP__=$gerritip
+fi
+if [ -z "$__RUNRICENV_DOCKER_HOST__" ]; then
+ export __RUNRICENV_DOCKER_HOST__=$dockerregistry
+fi
+if [ -z "$__RUNRICENV_DOCKER_IP__" ]; then
+ export __RUNRICENV_DOCKER_IP__=$dockerip
+fi
+if [ -z "$__RUNRICENV_DOCKER_PORT__" ]; then
+ export __RUNRICENV_DOCKER_PORT__=$dockerport
+fi
+if [ -z "$__RUNRICENV_DOCKER_USER__" ]; then
+ export __RUNRICENV_DOCKER_USER__=$dockeruser
+fi
+if [ -z "$__RUNRICENV_DOCKER_PASS__" ]; then
+ export __RUNRICENV_DOCKER_PASS__=$dockerpassword
+fi
+if [ -z "$__RUNRICENV_DOCKER_CERT__" ]; then
+ export __RUNRICENV_DOCKER_CERT__=$dockercert
+fi
+if [ -z "$__RUNRICENV_HELMREPO_HOST__" ]; then
+ export __RUNRICENV_HELMREPO_HOST__=$helmrepo
+fi
+if [ -z "$__RUNRICENV_HELMREPO_PORT__" ]; then
+ export __RUNRICENV_HELMREPO_PORT__=$helmport
+fi
+if [ -z "$__RUNRICENV_HELMREPO_IP__" ]; then
+ export __RUNRICENV_HELMREPO_IP__=$helmip
+fi
+if [ -z "$__RUNRICENV_HELMREPO_USER__" ]; then
+ export __RUNRICENV_HELMREPO_USER__=$helmuser
+fi
+if [ -z "$__RUNRICENV_HELMREPO_PASS__" ]; then
+ export __RUNRICENV_HELMREPO_PASS__=$helmpassword
+fi
+if [ -z "$__RUNRICENV_HELMREPO_CERT__" ]; then
+ export __RUNRICENV_HELMREPO_CERT__=$helmcert
+fi
+
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+HEAT_DIR="$WORKSPACE/ric-infra/00-Kubernetes/heat"
+BIN_DIR="$WORKSPACE/ric-infra/00-Kubernetes/bin"
+ETC_DIR="$WORKSPACE/ric-infra/00-Kubernetes/etc"
+ENV_DIR="$WORKSPACE/ric-infra/00-Kubernetes/heat/env"
+
+
+cd $BIN_DIR
+
+
+openstack --version > /dev/null
+if [ $? -eq 0 ]; then
+ echo "OK openstack CLI installed"
+else
+ echo "Must run in an envirnment with openstack cli"
+ exit 1
+fi
+
+if [ -z "$OS_USERNAME" ]; then
+ echo "Must source the Openstack RC file for the target installation tenant"
+ exit 1
+fi
+
+
+usage() {
+ echo "Usage: $0 [ -n <number of VMs {2-15}> ][ -s <stack name> ]<env> <ssh_keypair> <template>" 1>&2;
+ echo "n: Set the number of VMs that will be installed. " 1>&2;
+ echo "s: Set the name to be used for stack. This name will be used for naming of resources" 1>&2;
+ echo "d: Dryrun, only generating templates, no calling OpenStack API" 1>&2;
+ echo "6: When enabled, VMs will have an IPv6 interface." 1>&2;
+
+ exit 1;
+}
+
+
+dryrun='false'
+v6='false'
+while getopts ":n:w:s:6d" o; do
+ case "${o}" in
+ n)
+ if [[ ${OPTARG} =~ ^[0-9]+$ ]];then
+ if [ ${OPTARG} -ge 1 -a ${OPTARG} -le 15 ]; then
+ vm_num=${OPTARG}
+ else
+ usage
+ fi
+ else
+ usage
+ fi
+ ;;
+ s)
+ if [[ ! ${OPTARG} =~ ^[0-9]+$ ]];then
+ stack_name=${OPTARG}
+ else
+ usage
+ fi
+ ;;
+ w)
+ WORKDIR_NAME=${OPTARG}
+ ;;
+ 6)
+ v6=true
+ ;;
+ d)
+ dryrun=true
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ "$#" -lt 2 ]; then
+ usage
+fi
+
+ENV_FILE=$1
+if [ ! -f $ENV_FILE ]; then
+ echo ENV file does not exist or was not given
+ exit 1
+fi
+shift 1
+
+SSH_KEY=$1
+if [ ! -s $SSH_KEY ]; then
+ echo SSH Keypair file does not exist or was not given
+ exit 1
+fi
+shift 1
+
+if [ -z "$vm_num" ]; then
+ TMPL_FILE=$1
+ if [ ! -f $TMPL_FILE ]; then
+ echo Heat template file does not exist or was not given
+ exit 1
+ fi
+ shift 1
+fi
+
+# Prints all commands to output that are executed by the terminal
+set -x
+
+if [ -z "$WORKDIR_NAME" ]; then
+ WORKDIR_NAME="workdir-$(date +%Y%m%d%H%M%S)"
+fi
+WORKDIR="$BIN_DIR/$WORKDIR_NAME"
+rm -rf "$WORKDIR"
+mkdir -p "$WORKDIR"
+
+# get the openstack rc file env variable values in env file
+envsubst < $ENV_FILE > "$WORKDIR/$(basename $ENV_FILE)"
+ENV_FILE="$WORKDIR/$(basename $ENV_FILE)"
+
+# prepare (localize) all scripts to be installed to the cluster VMs
+SCRIPTS=$(ls -1 $HEAT_DIR/scripts/*)
+for SCRIPT in $SCRIPTS; do
+ envsubst '${__RUNRICENV_GERRIT_HOST__}
+ ${__RUNRICENV_GERRIT_IP__}
+ ${__RUNRICENV_DOCKER_HOST__}
+ ${__RUNRICENV_DOCKER_IP__}
+ ${__RUNRICENV_DOCKER_PORT__}
+ ${__RUNRICENV_DOCKER_USER__}
+ ${__RUNRICENV_DOCKER_PASS__}
+ ${__RUNRICENV_DOCKER_CERT__}
+ ${__RUNRICENV_HELMREPO_HOST__}
+ ${__RUNRICENV_HELMREPO_PORT__}
+ ${__RUNRICENV_HELMREPO_IP__}
+ ${__RUNRICENV_HELMREPO_CERT__}
+ ${__RUNRICENV_HELMREPO_USER__}
+ ${__RUNRICENV_HELMREPO_PASS__} '< $SCRIPT > "$WORKDIR/$(basename $SCRIPT)"
+done
+
+# generate a heat template with the specified number of VMs and IPv6 option
+if [ ! -z "$vm_num" ]; then
+ CURDIR=$(pwd)
+ if [ -z "$v6" ]; then
+ ./gen-ric-heat-yaml.sh -n $vm_num > "$WORKDIR/k8s-${vm_num}VMs.yaml"
+ TMPL_FILE="$WORKDIR/k8s-${vm_num}VMs.yaml"
+ else
+ ./gen-ric-heat-yaml.sh -6 -n $vm_num > "$WORKDIR/k8s-${vm_num}VMs-v6.yaml"
+ TMPL_FILE="$WORKDIR/k8s-${vm_num}VMs-v6.yaml"
+ fi
+fi
+
+if [ "$dryrun" == "true" ]; then
+ exit 0
+fi
+
+
+for n in $(seq 1 5); do
+ echo "${n} of 5 attempts to deploy the stack $stack_name"
+ FAILED='false'
+ if [ ! -z "$(openstack stack list |grep -w $stack_name)" ]; then
+ openstack stack delete $stack_name;
+ while [ "DELETE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $stack_name)" ]; do
+ echo "Waiting for stack $stack_name deletion to complete"
+ sleep 5
+ done
+ fi
+
+ # create a stack with the template and env files
+ if ! openstack stack create -t $TMPL_FILE -e $ENV_FILE $stack_name; then
+ FAILED='true'
+ break
+ fi
+
+ # wait for OpenStack stack creation completes
+ while [ "CREATE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $stack_name)" ]; do
+ sleep 20
+ done
+
+ STATUS=$(openstack stack show -c stack_status -f value $stack_name)
+ echo $STATUS
+ if [ "CREATE_COMPLETE" != "$STATUS" ]; then
+ echo "OpenSatck stack creation failed"
+ FAILED='true'
+ break;
+ fi
+
+ # wait till the Master node to become alive
+ for i in $(seq 1 30); do
+ sleep 30
+ K8S_MST_IP=$(openstack stack output show $stack_name k8s_mst_vm_ip -c output_value -f value)
+ timeout 1 ping -c 1 "$K8S_MST_IP" && break
+ done
+
+ timeout 1 ping -c 1 "$K8S_MST_IP" && break
+
+ echo Error: OpenStack infrastructure issue: unable to reach master node "$K8S_MST_IP"
+ FAILED='true'
+ sleep 10
+done
+
+if ! timeout 1 ping -c 1 "$K8S_MST_IP"; then
+ echo "Master node not reachable, stack creation failed, exit"
+ exit 2
+fi
+
+
+K8S_MASTER_HOSTNAME="${stack_name}-k8s-mst"
+echo "$K8S_MASTER_HOSTNAME $K8S_MST_IP" > ./ips-${stack_name}
+while ! nc -z $K8S_MST_IP 29999; do
+ echo "Wait for Master node $K8S_MST_IP to be ready"
+ sleep 5
+done
+
+set +e
+
+unset JOINCMD
+while [[ -z $JOINCMD ]]; do
+ sleep 15
+ JOINCMD=$(ssh -i $SSH_KEY ubuntu@$K8S_MST_IP -q -o "StrictHostKeyChecking no" sudo kubeadm token create --print-join-command)
+done
+
+for i in $(seq 1 99); do
+ IP_NAME=k8s_$(printf "%02d" "$i")_vm_ip
+ K8S_MINION_IP=$(openstack stack output show $stack_name $IP_NAME -c output_value -f value)
+ if [ -z $K8S_MINION_IP ]; then
+ break
+ fi
+ K8S_MINION_HOSTNAME=${stack_name}-k8s-$(printf "%02d" "$i")
+ echo "$K8S_MINION_HOSTNAME $K8S_MINION_IP" >> ./ips-${stack_name}
+
+ #while ! nc -z $K8S_MINION_IP 29999; do
+ # echo "Wait for minion node $K8S_MINION_IP to be ready"
+ # sleep 5
+ #done
+ echo "Joining $K8S_MINION_HOSTNAME [$K8S_MINION_IP] to cluster master $K8S_MST_IP with command $JOINCMD"
+ while ! ssh -i $SSH_KEY -q -o "StrictHostKeyChecking no" ubuntu@$K8S_MINION_IP sudo $JOINCMD; do
+ echo "Retry join command in 10 seconds"
+ sleep 10
+ done
+done
+
+export __IPS_${stack_name}__="$(cat ${WORKDIR}/ips-${stack_name})"
--- /dev/null
+#!/bin/bash
+#
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+
+# the following script replaces templates in a script with env variables defined in etc folder
+# when running without specifying a script, the default is to use the heat/scripts/k8s-vm-install.sh,
+# the result which is a script that can be used as cloud-init script and the initial installation
+# script that turns a newly launched VM into a single node k8s cluster with Helm.
+
+usage() {
+ echo "Usage: $0 <template file>" 1>&2;
+ echo " If the template file is supplied, the template file is processed;" 1>&2;
+ echo " Otherwise the k8s_vm_install.sh file under heat/script is used as template." 1>&2;
+ exit 1;
+}
+
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+set -a
+RCS="$(find $DIR/../etc -type f -maxdepth 1)"
+for RC in $RCS; do
+ echo "reading in values in $RC"
+ source $RC
+done
+set +a
+
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+HEAT_DIR="$WORKSPACE/ric-infra/00-Kubernetes/heat"
+BIN_DIR="$WORKSPACE/ric-infra/00-Kubernetes/bin"
+ETC_DIR="$WORKSPACE/ric-infra/00-Kubernetes/etc"
+ENV_DIR="$WORKSPACE/ric-infra/00-Kubernetes/heat/env"
+
+if [ -z "$1" ]; then
+ TMPL="${HEAT_DIR}/scripts/k8s_vm_install.sh"
+else
+ TMPL="$1"
+fi
+
+
+if [ -z "$__RUNRICENV_GERRIT_HOST__" ]; then
+ export __RUNRICENV_GERRIT_HOST__=$gerrithost
+fi
+if [ -z "$__RUNRICENV_GERRIT_IP__" ]; then
+ export __RUNRICENV_GERRIT_IP__=$gerritip
+fi
+if [ -z "$__RUNRICENV_DOCKER_HOST__" ]; then
+ export __RUNRICENV_DOCKER_HOST__=$dockerregistry
+fi
+if [ -z "$__RUNRICENV_DOCKER_IP__" ]; then
+ export __RUNRICENV_DOCKER_IP__=$dockerip
+fi
+if [ -z "$__RUNRICENV_DOCKER_PORT__" ]; then
+ export __RUNRICENV_DOCKER_PORT__=$dockerport
+fi
+if [ -z "$__RUNRICENV_DOCKER_USER__" ]; then
+ export __RUNRICENV_DOCKER_USER__=$dockeruser
+fi
+if [ -z "$__RUNRICENV_DOCKER_PASS__" ]; then
+ export __RUNRICENV_DOCKER_PASS__=$dockerpassword
+fi
+if [ -z "$__RUNRICENV_DOCKER_CERT__" ]; then
+ export __RUNRICENV_DOCKER_CERT__=$dockercert
+fi
+if [ -z "$__RUNRICENV_HELMREPO_HOST__" ]; then
+ export __RUNRICENV_HELMREPO_HOST__=$helmrepo
+fi
+if [ -z "$__RUNRICENV_HELMREPO_PORT__" ]; then
+ export __RUNRICENV_HELMREPO_PORT__=$helmport
+fi
+if [ -z "$__RUNRICENV_HELMREPO_IP__" ]; then
+ export __RUNRICENV_HELMREPO_IP__=$helmip
+fi
+if [ -z "$__RUNRICENV_HELMREPO_USER__" ]; then
+ export __RUNRICENV_HELMREPO_USER__=$helmuser
+fi
+if [ -z "$__RUNRICENV_HELMREPO_PASS__" ]; then
+ export __RUNRICENV_HELMREPO_PASS__=$helmpassword
+fi
+if [ -z "$__RUNRICENV_HELMREPO_CERT__" ]; then
+ export __RUNRICENV_HELMREPO_CERT__=$helmcert
+fi
+
+
+filename=$(basename -- "$TMPL")
+extension="${filename##*.}"
+filename="${filename%.*}"
+
+envsubst '${__RUNRICENV_GERRIT_HOST__}
+ ${__RUNRICENV_GERRIT_IP__}
+ ${__RUNRICENV_DOCKER_HOST__}
+ ${__RUNRICENV_DOCKER_IP__}
+ ${__RUNRICENV_DOCKER_PORT__}
+ ${__RUNRICENV_DOCKER_USER__}
+ ${__RUNRICENV_DOCKER_PASS__}
+ ${__RUNRICENV_DOCKER_CERT__}
+ ${__RUNRICENV_HELMREPO_HOST__}
+ ${__RUNRICENV_HELMREPO_PORT__}
+ ${__RUNRICENV_HELMREPO_IP__}
+ ${__RUNRICENV_HELMREPO_CERT__}
+ ${__RUNRICENV_HELMREPO_USER__}
+ ${__RUNRICENV_HELMREPO_PASS__}' < "$TMPL" > "$filename"
+
+# fill values that are supplied by Heat stack deployment process as much as we can
+sed -e "s/__docker_version__/${INFRA_DOCKER_VERSION}/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__k8s_version__/${INFRA_K8S_VERSION}/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__k8s_cni_version__/${INFRA_CNI_VERSION}/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__helm_version__/${INFRA_HELM_VERSION}/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__k8s_mst_private_ip_addr__/\$(hostname -I)/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__host_private_ip_addr__/\$(hostname -I)/g" "$filename" > tmp && mv tmp "$filename"
+#sed -e "s/__k8s_mst_floating_ip_addr__/\$(ec2metadata --public-ipv4)/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__k8s_mst_floating_ip_addr__/\$(curl ifconfig.co)/g" "$filename" > tmp && mv tmp "$filename"
+sed -e "s/__stack_name__/\$(hostname)/g" "$filename" > tmp && mv tmp "$filename"
+#echo "__mtu__" > /opt/config/mtu.txt
+#echo "__cinder_volume_id__" > /opt/config/cinder_volume_id.txt
+
+
+chmod +x "$filename"
+if [ -z "$1" ]; then
+ mv "$filename" k8s-1node-cloud-init.sh
+ # reboot VM to load the new kernel.
+ echo 'if [ "$(uname -r)" != "4.15.0-45-lowlatency" ]; then reboot; fi' >> k8s-1node-cloud-init.sh
+fi
--- /dev/null
+#!/bin/bash
+#
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+
+usage() {
+ echo "Usage: $0 [ -n <number of VMs {1-99}> ][ -6 ]" 1>&2;
+
+ echo "n: Set the number of VMs that will be installed. This number must be between 1 and 99; " 1>&2;
+ echo "6: When set, VMs will be configured with an IPv6 interface" 1>&2;
+ exit 1;
+}
+
+unset V6
+while getopts ":n:6" o; do
+ case "${o}" in
+ n)
+ if [[ ${OPTARG} =~ ^[0-9]+$ ]];then
+ if [ ${OPTARG} -ge 1 -a ${OPTARG} -le 15 ]; then
+ vm_num=${OPTARG}
+ else
+ usage
+ fi
+ else
+ usage
+ fi
+ ;;
+ 6)
+ V6='-v6'
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+NUM_K8S_VMS=$(($vm_num - 1))
+unset SEQ
+if [ "$NUM_K8S_VMS" -gt "0" ]; then
+ SEQ=$(seq -f %02g $NUM_K8S_VMS)
+fi
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+PARTS_DIR=$WORKSPACE/ric-infra/00-Kubernetes/heat/parts
+
+cat <<EOF
+#
+# Generated by scripts/gen-onap-oom-yaml.sh; MANUAL CHANGES WILL BE LOST
+#
+EOF
+
+cat $PARTS_DIR/part-1${V6}.yaml
+
+
+# the first node is master
+#for VM_NUM in mst $(seq -f %02g $NUM_K8S_VMS); do
+for VM_NUM in mst $SEQ; do
+ VM_TYPE=k8s HOST_LABEL=compute VM_NUM=$VM_NUM envsubst < $PARTS_DIR/part-2${V6}.yaml
+done
+
+
+cat $PARTS_DIR/part-3${V6}.yaml
+
+#for VM_NUM in mst $(seq -f %02g $NUM_K8S_VMS); do
+for VM_NUM in mst $SEQ; do
+ K8S_VM_NAME=k8s_$VM_NUM
+ cat <<EOF
+ ${K8S_VM_NAME}_vm_ip:
+ description: The IP address of the ${K8S_VM_NAME} instance
+ value: { get_attr: [${K8S_VM_NAME}_floating_ip, floating_ip_address] }
+
+EOF
+done
# limitations under the License. #
################################################################################
-# This is a temporary script that today deploy one node K8S cluster
-./install-1node-k8s
+# 1. Edit the ../etc/env.rc file for local deployment's Gerrit, Nexus repos, Helm repo
+# parameters
+# 2. Update the ../etc/openstack.rc file for OpenStack installation parameters
+# 3. Running from an environment with OpenStackl CLI access
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+set -a
+RCS="$(find $DIR/../etc -type f -maxdepth 1)"
+for RC in $RCS; do
+ echo "reading in values in $RC"
+ source $RC
+done
+set +a
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+HEAT_DIR="$WORKSPACE/ric-infra/00-Kubernetes/heat"
+BIN_DIR="$WORKSPACE/ric-infra/00-Kubernetes/bin"
+
+
+stackname=aux
+
+for stackname in ric aux; do
+ WORKDIR_NAME="WORKDIR_${stackname}"
+ WORKDIR="${BIN_DIR}/${WORKDIR_NAME}"
+ echo ./deploy-stack.sh -w "$WORKDIR_NAME" -s "$stackname" -n 2 -6 "../heat/env/${stackname}.env" ${SSH_KEY}
+done
+
+exit
+# set up cross cluster hostname resolution for well-known host names
+RIC_MST_IP=$(head -1 ${WORKDIR}/ips-ric | cut -f2 -d' ')
+AUX_MST_IP=$(head -1 ${WORKDIR}/ips-aux | cut -f2 -d' ')
+for IP in $(cut -f2 -d ' ' ips-ric); do
+ REMOTE_CMD="sudo sh -c \"echo '"$AUX_MST_IP" ves.aux.local' >> /etc/hosts; \
+ echo '"$AUX_MST_IP" es.aux.local' >> /etc/hosts\""
+ ssh -i $SSH_KEY -q -o "StrictHostKeyChecking no" ubuntu@$IP "$REMOTE_CMD"
+done
+for IP in $(cut -f2 -d ' ' ips-aux); do
+ REMOTE_CMD="sudo sh -c \"echo '"$RIC_MST_IP" a1.aux.local' >> /etc/hosts\""
+ ssh -i $SSH_KEY -q -o "StrictHostKeyChecking no" ubuntu@$IP "$REMOTE_CMD"
+done
+
+++ /dev/null
-#!/bin/bash
-
-################################################################################
-# Copyright (c) 2019 AT&T Intellectual Property. #
-# Copyright (c) 2019 Nokia. #
-# #
-# Licensed under the Apache License, Version 2.0 (the "License"); #
-# you may not use this file except in compliance with the License. #
-# You may obtain a copy of the License at #
-# #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-# #
-# Unless required by applicable law or agreed to in writing, software #
-# distributed under the License is distributed on an "AS IS" BASIS, #
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
-# See the License for the specific language governing permissions and #
-# limitations under the License. #
-################################################################################
-
-# The intention for this script is to stand up a dev testing k8s environment
-# that is ready for RIC installation for individual developer/team's API and functional
-# testing needs.
-# The integration team will maintain the synchronization of software infrastructure
-# stack (software, versions and configurations) between this iscript and what is
-# provided for the E2E validation testing. Due to resource and other differences, this
-# environment is not intended for any testing related to performance, resilience,
-# robustness, etc.
-
-# This script installs docker host, a one-node k8s cluster, and Helm for CoDev.
-# This script assumes that it will be executed on an Ubuntu 16.04 VM.
-# It is best to be run as the cloud-init script at the VM launch time, or from a
-# "sudo -i" shell post-launch on a newly launched VM.
-#
-
-set -x
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-source $DIR/../etc/k8s-1node
-
-
-if [ -z "$__RUNRICENV_GERRIT_HOST__" ]; then
- export __RUNRICENV_GERRIT_HOST__=$gerrithost
-fi
-if [ -z "$__RUNRICENV_GERRIT_IP__" ]; then
- export __RUNRICENV_GERRIT_IP__=$gerritip
-fi
-if [ -z "$__RUNRICENV_DOCKER_HOST__" ]; then
- export __RUNRICENV_DOCKER_HOST__=$dockerregistry
-fi
-if [ -z "$__RUNRICENV_DOCKER_IP__" ]; then
- export __RUNRICENV_DOCKER_IP__=$dockerip
-fi
-if [ -z "$__RUNRICENV_DOCKER_PORT__" ]; then
- export __RUNRICENV_DOCKER_PORT__=$dockerport
-fi
-if [ -z "$__RUNRICENV_DOCKER_USER__" ]; then
- export __RUNRICENV_DOCKER_USER__=$dockeruser
-fi
-if [ -z "$__RUNRICENV_DOCKER_PASS__" ]; then
- export __RUNRICENV_DOCKER_PASS__=$dockerpassword
-fi
-if [ -z "$__RUNRICENV_HELMREPO_HOST__" ]; then
- export __RUNRICENV_HELMREPO_HOST__=$helmrepo
-fi
-if [ -z "$__RUNRICENV_HELMREPO_PORT__" ]; then
- export __RUNRICENV_HELMREPO_PORT__=$helmport
-fi
-if [ -z "$__RUNRICENV_HELMREPO_IP__" ]; then
- export __RUNRICENV_HELMREPO_IP__=$helmip
-fi
-if [ -z "$__RUNRICENV_HELMREPO_USER__" ]; then
- export __RUNRICENV_HELMREPO_USER__=$helmuser
-fi
-if [ -z "$__RUNRICENV_HELMREPO_PASS__" ]; then
- export __RUNRICENV_HELMREPO_PASS__=$helmpassword
-fi
-
-
-
-
-# for RIC R0 we keep 1.13
-export KUBEV="1.13.3"
-export KUBECNIV="0.6.0"
-export DOCKERV="18.06.1"
-
-# for new 1.14 release
-#export KUBEVERSION="1.14.0"
-#export KUBECNIVERSION="0.7.0"
-#export DOCKEFV="18.06.1"
-
-export HELMV="2.12.3"
-
-unset FIRSTBOOT
-unset DORESET
-
-while getopts ":r" opt; do
- case ${opt} in
- r )
- DORESET='YES'
- ;;
- \? )
- echo "Usage: $0 [-r]"
- exit
- ;;
- esac
-done
-
-
-if [ ! -e /var/tmp/firstboot4setupk8s ]; then
- echo "First time"
- FIRSTBOOT='YES'
- touch /var/tmp/firstboot4setupk8s
-
- modprobe -- ip_vs
- modprobe -- ip_vs_rr
- modprobe -- ip_vs_wrr
- modprobe -- ip_vs_sh
- modprobe -- nf_conntrack_ipv4
-
- # disable swap
- SWAPFILES=$(grep swap /etc/fstab | sed '/^#/ d' |cut -f1 -d' ')
- if [ ! -z $SWAPFILES ]; then
- for SWAPFILE in $SWAPFILES
- do
- echo "disabling swap file $SWAPFILE"
- if [[ $SWAPFILE == UUID* ]]; then
- UUID=$(echo $SWAPFILE | cut -f2 -d'=')
- swapoff -U $UUID
- else
- swapoff $SWAPFILE
- fi
- # edit /etc/fstab file, remove line with /swapfile
- sed -i -e "/$SWAPFILE/d" /etc/fstab
- done
- fi
- # disable swap
- #swapoff /swapfile
- # edit /etc/fstab file, remove line with /swapfile
- #sed -i -e '/swapfile/d' /etc/fstab
-
-
- # add rancodev CI tool hostnames
- echo "${__RUNRICENV_GERRIT_IP__} ${__RUNRICENV_GERRIT_HOST__}" >> /etc/hosts
- echo "${__RUNRICENV_DOCKER_IP__} ${__RUNRICENV_DOCKER_HOST__}" >> /etc/hosts
- echo "${__RUNRICENV_HELMREPO_IP__} ${__RUNRICENV_HELMREPO_HOST__}" >> /etc/hosts
-
-
- # create kubenetes config file
- if [[ ${KUBEV} == 1.13.* ]]; then
- cat <<EOF >/root/config.yaml
-apiVersion: kubeadm.k8s.io/v1alpha3
-kubernetesVersion: v${KUBEV}
-kind: ClusterConfiguration
-apiServerExtraArgs:
- feature-gates: SCTPSupport=true
-networking:
- dnsDomain: cluster.local
- podSubnet: 10.244.0.0/16
- serviceSubnet: 10.96.0.0/12
-
----
-apiVersion: kubeproxy.config.k8s.io/v1alpha1
-kind: KubeProxyConfiguration
-mode: ipvs
-EOF
- elif [[ ${KUBEV} == 1.14.* ]]; then
- cat <<EOF >/root/config.yaml
-apiVersion: kubeadm.k8s.io/v1beta1
-kubernetesVersion: v${KUBEV}
-kind: ClusterConfiguration
-apiServerExtraArgs:
- feature-gates: SCTPSupport=true
-networking:
- dnsDomain: cluster.local
- podSubnet: 10.244.0.0/16
- serviceSubnet: 10.96.0.0/12
-
----
-apiVersion: kubeproxy.config.k8s.io/v1alpha1
-kind: KubeProxyConfiguration
-mode: ipvs
-EOF
- else
- echo "Unsupported Kubernetes version requested. Bail."
- exit
- fi
-
-
- # create a RBAC file for helm (tiller)
- cat <<EOF > /root/rbac-config.yaml
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: tiller
- namespace: kube-system
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: tiller
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: cluster-admin
-subjects:
- - kind: ServiceAccount
- name: tiller
- namespace: kube-system
-EOF
-
-
- KUBEVERSION="${KUBEV}-00"
- CNIVERSION="${KUBECNIV}-00"
- DOCKERVERSION="${DOCKERV}-0ubuntu1.2~16.04.1"
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
- echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list
-
- # install low latency kernel, docker.io, and kubernetes
- apt-get update
- apt-get install -y linux-image-4.15.0-45-lowlatency docker.io=${DOCKERVERSION}
- apt-get install -y kubernetes-cni=${CNIVERSION}
- apt-get install -y --allow-unauthenticated kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
- apt-mark hold kubernetes-cni kubelet kubeadm kubectl
-
- # install Helm
- HELMVERSION=${HELMV}
- cd /root
- mkdir Helm
- cd Helm
- wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
- tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
- mv linux-amd64/helm /usr/local/bin/helm
-
-
- # add cert for accessing docker registry in Azure
- mkdir -p /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
- cat <<EOF >/etc/docker/ca.crt
------BEGIN CERTIFICATE-----
-MIIEPjCCAyagAwIBAgIJAIwtTKgVAnvrMA0GCSqGSIb3DQEBCwUAMIGzMQswCQYD
-VQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCkJlZG1pbnN0ZXIxDTALBgNV
-BAoMBEFUJlQxETAPBgNVBAsMCFJlc2VhcmNoMTswOQYDVQQDDDIqLmRvY2tlci5y
-YW5jby1kZXYtdG9vbHMuZWFzdHVzLmNsb3VkYXBwLmF6dXJlLmNvbTEjMCEGCSqG
-SIb3DQEJARYUcmljQHJlc2VhcmNoLmF0dC5jb20wHhcNMTkwMTI0MjA0MzIzWhcN
-MjQwMTIzMjA0MzIzWjCBszELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5KMRMwEQYD
-VQQHDApCZWRtaW5zdGVyMQ0wCwYDVQQKDARBVCZUMREwDwYDVQQLDAhSZXNlYXJj
-aDE7MDkGA1UEAwwyKi5kb2NrZXIucmFuY28tZGV2LXRvb2xzLmVhc3R1cy5jbG91
-ZGFwcC5henVyZS5jb20xIzAhBgkqhkiG9w0BCQEWFHJpY0ByZXNlYXJjaC5hdHQu
-Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuAW1O52l9/1L+D7x
-Qv+782FjiELP9MWO0RCAL2JzV6Ioeo1IvuZl8jvXQVGlowanCdz1HZlMJjGm6Ybv
-60dVECRSMZeOxUQ0JCus6thxOhDiiCFT59m+MpdrRgHqwOzw+8B49ZwULv+lTIWt
-ETEQkSYTh4No9jCxnyVLKH9DyTbaW/xFK484u5f4bh7mI5uqDJapOCRvJXv8/J0E
-eMrkCVmk5qy0ii8I7O0oCNl61YvC5by9GCeuQhloJJc6gOjzKW8nK9JfUW8G34bC
-qnUj79EgwgtW/8F5SYAF5LI0USM0xXjyzlnPMbv5mikrbf0EZkZXdUreICUIzY53
-HRocCQIDAQABo1MwUTAdBgNVHQ4EFgQUm9NbNhZ3Zp1f50DIN4/4fvWQSNswHwYD
-VR0jBBgwFoAUm9NbNhZ3Zp1f50DIN4/4fvWQSNswDwYDVR0TAQH/BAUwAwEB/zAN
-BgkqhkiG9w0BAQsFAAOCAQEAkbuqbuMACRmzMXFKoSsMTLk/VRQDlKeubdP4lD2t
-Z+2dbhfbfiae9oMly7hPCDacoY0cmlBb2zZ8lgA7kVvuw0xwX8mLGYfOaNG9ENe5
-XxFP8MuaCySy1+v5CsNnh/WM3Oznc6MTv/0Nor2DeY0XHQtM5LWrqyKGZaVAKpMW
-5nHG8EPIZAOk8vj/ycg3ca3Wv3ne9/8rbrrxDJ3p4L70DOtz/JcQai10Spct4S0Z
-7yd4tQL+QSQCvmN7Qm9+i52bY0swYrUAhbNiEX3yJDryKjSCPirePcieGZmBRMxr
-7j28jxpa4g32TbWR/ZdxMYEkCVTFViTE23kZdNvahHKfdQ==
------END CERTIFICATE-----
-EOF
- cp /etc/docker/ca.crt /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/ca.crt
- service docker restart
- systemctl enable docker.service
- docker login -u ${__RUNRICENV_DOCKER_USER__} -p ${__RUNRICENV_DOCKER_PASS__} ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
- docker pull ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/whoami:0.0.1
-
-
- # test access to k8s docker registry
- kubeadm config images pull
-else
- echo "Not first boot"
-
- kubectl get pods --all-namespaces
-fi
-
-
-if [ -n "$DORESET" ]; then
- kubeadm reset
-fi
-
-if [ -n ${DORESET+set} ] || [ -n ${FIRSTBOOT+set} ]; then
- # start cluster (make sure CIDR is enabled with the flag)
- kubeadm init --config /root/config.yaml
-
- # set up kubectl credential and config
- cd /root
- rm -rf .kube
- mkdir -p .kube
- cp -i /etc/kubernetes/admin.conf /root/.kube/config
- chown root:root /root/.kube/config
-
- # at this point we should be able to use kubectl
- kubectl get pods --all-namespaces
- # you will see the DNS pods stuck in pending state. They are waiting for some networking to be installed.
-
- # install flannel
- # kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
- kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
-
- # waiting for all kube-system pods to be in running state
- NUMPODS=0
- while [ $NUMPODS -lt 8 ]; do
- sleep 5
- OUTPUT=$(kubectl get pods --all-namespaces |grep Running)
- NUMPODS=$(echo "$OUTPUT" | wc -l)
- echo "Waiting for $NUMPODS / 8 kube-system pods reaching Running state"
- done
-
- # if running a single node cluster, need to enable master node to run pods
- kubectl taint nodes --all node-role.kubernetes.io/master-
-
- cd /root
- # install RBAC for Helm
- kubectl create -f rbac-config.yaml
-
- rm -rf .helm
- helm init --service-account tiller
-
-
- cat <<EOF >/etc/ca-certificates/update.d/helm.crt
------BEGIN CERTIFICATE-----
-MIIESjCCAzKgAwIBAgIJAIU+AfULkw0PMA0GCSqGSIb3DQEBCwUAMIG5MQswCQYD
-VQQGEwJVUzETMBEGA1UECAwKTmV3IEplcnNleTETMBEGA1UEBwwKQmVkbWluc3Rl
-cjENMAsGA1UECgwEQVQmVDERMA8GA1UECwwIUmVzZWFyY2gxOTA3BgNVBAMMMCou
-aGVsbS5yYW5jby1kZXYtdG9vbHMuZWFzdHVzLmNsb3VkYXBwLmF6dXJlLmNvbTEj
-MCEGCSqGSIb3DQEJARYUcmljQHJlc2VhcmNoLmF0dC5jb20wHhcNMTkwMzIxMTU1
-MzAwWhcNMjEwMzIwMTU1MzAwWjCBuTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCk5l
-dyBKZXJzZXkxEzARBgNVBAcMCkJlZG1pbnN0ZXIxDTALBgNVBAoMBEFUJlQxETAP
-BgNVBAsMCFJlc2VhcmNoMTkwNwYDVQQDDDAqLmhlbG0ucmFuY28tZGV2LXRvb2xz
-LmVhc3R1cy5jbG91ZGFwcC5henVyZS5jb20xIzAhBgkqhkiG9w0BCQEWFHJpY0By
-ZXNlYXJjaC5hdHQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
-tguhSQx5Dk2w+qx2AOcFRz7IZBASEehK1Z4f5jz2KrRylGx6jjedCZASdm1b0ZEB
-/ZNrKht1zsWDETa7x0DF+q0Z2blff+T+6+YrJWhNxYHgZiYVi9gTuNDzpn8VVn7f
-+cQxcMguHo1JBDIotOLubJ4T3/oXMCPv9kRSLHcNjbEE2yTB3AqXu9dvrDXuUdeU
-ot6RzxhKXxRCQXPS2/FDjSV9vr9h1dv5fIkFXihpYaag0XqvXcqgncvcOJ1SsLc3
-DK+tyNknqG5SL8y2a7U4F7u+qGO2/3tnCO0ggYwa73hS0pQPY51EpRSckZqlfKEu
-Ut0s3wlEFP1VaU0RfU3aIwIDAQABo1MwUTAdBgNVHQ4EFgQUYTpoVXZPXSR/rhjr
-pu9PPhL7f9IwHwYDVR0jBBgwFoAUYTpoVXZPXSR/rhjrpu9PPhL7f9IwDwYDVR0T
-AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAUDLbiKVIW6W9qFXLtoyO7S2e
-IOUSZ1F70pkfeYUqegsfFZ9njPtPqTzDfJVxYqH2V0vxxoAxXCYCpNyR6vYlYiEL
-R+oyxuvauW/yCoiwKBPYa4fD/PBajJnEO1EfIwZvjFLIfw4GjaX59+zDS3Zl0jT/
-w3uhPSsJAYXtDKLZ14btA27cM5mW4kmxVD8CRdUW0jr/cN3Hqe9uLSNWCNiDwma7
-RnpK7NnOgXHyhZD/nVC0nY7OzbK7VHFJatSOjyuMxgWsFGahwYNxf3AWfPwUai0K
-ne/fVFGZ6ifR9QdD0SuKIAEuqSyyP4BsQ92uEweU/gWKsnM6iNVmNFX8UOuU9A==
------END CERTIFICATE-----
-EOF
-
- # waiting for tiller pod to be in running state
- NUMPODS=0
- while [ $NUMPODS -lt 1 ]; do
- sleep 5
- OUTPUT=$(kubectl get pods --all-namespaces |grep Running)
- NUMPODS=$(echo "$OUTPUT" | grep "tiller-deploy" | wc -l)
- echo "Waiting for $NUMPODS / 1 tiller-deploy pod reaching Running state"
- done
-
- echo "All up"
-
- #reboot
-fi
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+stack_name="ric"
+full_deletion=false
+
+#WORKSPACE=`/home/ubuntu/deploy-oom-onap/integration-master/integration`
+#echo $WORKSPACE
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`pwd`
+fi
+
+
+openstack --version > /dev/null
+if [ $? -eq 0 ]; then
+ echo OK
+else
+ echo "Must run in an envirnment with openstack cli"
+ exit 1
+fi
+
+if [ -z "$OS_USERNAME" ]; then
+ echo "Must source the Openstack RC file for the target installation tenant"
+ exit 1
+fi
+
+
+usage() {
+ echo "Usage: $0 [ -s <stack name> ]" 1>&2;
+
+ echo "s: Set the name to be used for stack. This name will be used for naming of resources" 1>&2;
+ exit 1;
+}
+
+
+while getopts ":n:s:m:rq6" o; do
+ case "${o}" in
+ s)
+ if [[ ! ${OPTARG} =~ ^[0-9]+$ ]];then
+ stack_name=${OPTARG}
+ else
+ usage
+ fi
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ "$#" -gt 0 ]; then
+ usage
+fi
+
+
+openstack stack delete $stack_name
+
+exit 0
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+
+# customize the following repo info to local infrastructure
+# Gerrit code repo server
+gerrithost=""
+# Gerrit code repo server IP
+gerritip=""
+
+# Docker registry host name
+dockerregistry=""
+# Docker registry IP (if need to create local /etc/hosts entry)
+dockerip=""
+# Docker registry port
+dockerport=""
+# Docker registry user name
+dockeruser=""
+# Docker registry password
+dockerpassword=""
+# Docker registry CA certifiacte (if using self-signed cert)
+dockercert=''
+
+# Helm repo host name
+helmrepo=helm.ricinfra.local
+# Helm repo port
+helmport=""
+# Helm repo IP (if need to create local /etc/hosts entry)
+helmip=""
+# Helm repo user name
+helmuser=""
+# Helm repo password
+helmpassword=""
+# Helm repo CA certifiacte (if using self-signed cert)
+helmcert=''
--- /dev/null
+# modify below for RIC infrastructure (docker-k8s-helm) component versions
+INFRA_DOCKER_VERSION="18.06.1"
+INFRA_K8S_VERSION="1.13.3"
+INFRA_CNI_VERSION="0.6.0"
+INFRA_HELM_VERSION="2.12.3"
+
--- /dev/null
+# modify this section based on the deployment openstack instance's
+# OpenStack API access RC file
+OS_AUTH_URL=""
+OS_PROJECT_ID=""
+OS_PROJECT_NAME=""
+OS_USER_DOMAIN_NAME=""
+OS_USERNAME=""
+OS_PASSWORD=""
+OS_REGION_NAME=""
+OS_INTERFACE=""
+OS_IDENTITY_API_VERSION=""
+
+# ssh key pair used for accessing individual VMs
+# the private key file
+VM_SSH_KEY=""
+# the name of the ssh public key uploaded to the openstack instance
+OS_KEY_NAME=""
+
+# Ubuntu 16.04 VM image name used for launching VMs
+OS_IMAGE_NAME=""
+
+# the UUID of the public net (floating IP net) of the openstack instance
+OS_PUBLIC_NET_ID=""
+
+# the UUID of the public IPv6 net of the openstack instance
+OS_PUBLIC_V6NET_ID=""
+
+# host:port of the APT proxy
+OS_APT_PROXY=""
+# host:port of the Docker proxy
+OS_DOCKER_PROXY=""
+
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# HEAT stack deploy env file
+# customize the following for local OpenStack parameters
+# ubuntu_1604_image: local OpenStack's image name for Ubuntu 1604
+# apt_proxy: host and port of the apt server
+# docker_proxy: host and port of the docker registry
+# k8s_vm_flavor: dimension of the VMs for the k8s cluster nodes
+# public_net_id: the UUID of the IPv4 network where VMs get their IPv4 floating IP addresses
+# public_v6net_id: the UUID of the IPv6 network where the VMs connect their IPv6 interfaces to
+# int_net_cidr: the address space of the internal network connecting all VMs of the stack
+# helm_override_yaml: the configurations for Helm deployments
+parameters:
+ ubuntu_1604_image: "${OS_IMAGE_NAME}"
+ apt_proxy: "${OS_APT_PROXY}"
+ docker_proxy: "${OS_DOCKER_PROXY}"
+ public_net_id: "${OS_PUBLIC_NET_ID}"
+ public_v6net_id: "${OS_PUBLIC_V6NET_ID}"
+ key_name: "${OS_KEY_NAME}"
+
+ k8s_vm_flavor: m1.large
+ int_net_cidr: 10.0.0.0/16
+
+ helm_deploy_delay: 4m
+
+ helm_override_yaml: >
+ global:
+ repository: __docker_proxy__
+ pullPolicy: IfNotPresent
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# HEAT stack deploy env file
+# customize the following for local OpenStack parameters
+# ubuntu_1604_image: local OpenStack's image name for Ubuntu 1604
+# apt_proxy: host and port of the apt server
+# docker_proxy: host and port of the docker registry
+# k8s_vm_flavor: dimension of the VMs for the k8s cluster nodes
+# public_net_id: the UUID of the IPv4 network where VMs get their IPv4 floating IP addresses
+# public_v6net_id: the UUID of the IPv6 network where the VMs connect their IPv6 interfaces to
+# int_net_cidr: the address space of the internal network connecting all VMs of the stack
+# helm_override_yaml: the configurations for Helm deployments
+parameters:
+ ubuntu_1604_image: "${OS_IMAGE_NAME}"
+ apt_proxy: "${OS_APT_PROXY}"
+ docker_proxy: "${OS_DOCKER_PROXY}"
+ public_net_id: "${OS_PUBLIC_NET_ID}"
+ public_v6net_id: "${OS_PUBLIC_V6NET_ID}"
+ key_name: "${OS_KEY_NAME}"
+
+ docker_version: "${INFRA_DOCKER_VERSION}"
+ k8s_version: "${INFRA_K8S_VERSION}"
+ k8s_cni_version: "${INFRA_CNI_VERSION}"
+ helm_version: "${INFRA_HELM_VERSION}"
+
+ k8s_vm_flavor: m1.medium
+ int_net_cidr: 10.0.0.0/16
+
+ helm_deploy_delay: 4m
+
+ helm_override_yaml: >
+ global:
+ repository: __docker_proxy__
+ pullPolicy: IfNotPresent
--- /dev/null
+heat_template_version: 2015-10-15
+description: OpenStack HOT for RIC Kubernetes cluster
+
+parameters:
+ docker_proxy:
+ type: string
+ default: ""
+
+ apt_proxy:
+ type: string
+ default: ""
+
+ public_net_id:
+ type: string
+ description: The ID of the Public network for floating IP address allocation
+
+ public_v6net_id:
+ type: string
+ description: The ID of the IPv6 provider network for ipv6 interface IP address allocation
+
+ int_net_cidr:
+ type: string
+ description: CIDR of the OAM ONAP network
+
+ ubuntu_1604_image:
+ type: string
+ description: Name of the Ubuntu 16.04 image
+
+ k8s_vm_flavor:
+ type: string
+ description: VM flavor for k8s hosts
+
+ helm_override_yaml:
+ type: string
+ description: Content for helm_override.yaml
+
+ docker_manifest:
+ type: string
+ default: ""
+
+ key_name:
+ type: string
+ default: "id-oran-int"
+
+ docker_version:
+ type: string
+ default: "18.06.1"
+
+ k8s_version:
+ type: string
+ default: "1.13.3"
+
+ k8s_cni_version:
+ type: string
+ default: "0.6.0"
+
+ helm_version:
+ type: string
+ default: "2.12.3"
+
+ helm_deploy_delay:
+ type: string
+ default: "2m"
+
+ use_ramdisk:
+ type: string
+ description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
+ default: "false"
+
+ mtu:
+ type: number
+ default: 1500
+
+resources:
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 5
+ name:
+ str_replace:
+ template: volume_rand
+ params:
+ rand: { get_resource: random-str }
+
+ # RIC security group
+ ric_sg:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: ric_sg
+ rand: { get_resource: random-str }
+ description: security group for RIC traffic
+ rules:
+ # All egress traffic
+ - direction: egress
+ ethertype: IPv4
+ - direction: egress
+ ethertype: IPv6
+ # ingress traffic
+ # ICMP
+ - protocol: icmp
+ ethertype: IPv4
+ - protocol: icmp
+ ethertype: IPv6
+ - protocol: udp
+ ethertype: IPv4
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: udp
+ ethertype: IPv6
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: tcp
+ ethertype: IPv4
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: tcp
+ ethertype: IPv6
+ port_range_min: 1
+ port_range_max: 65535
+ # additional IP Protocols to allow
+ # SCTP
+ - protocol: 132
+ ethertype: IPv4
+ - protocol: 132
+ ethertype: IPv6
+
+
+ # RIC internal network
+ int_network:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: ric_network_rand
+ params:
+ rand: { get_resource: random-str }
+
+ int_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: oam_network_rand
+ params:
+ rand: { get_resource: random-str }
+ network_id: { get_resource: int_network }
+ cidr: { get_param: int_net_cidr }
+ dns_nameservers: [ "8.8.8.8" ]
+
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']]
+ external_gateway_info:
+ network: { get_param: public_net_id }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: int_subnet }
+
--- /dev/null
+heat_template_version: 2015-10-15
+description: OpenStack HOT for RIC Kubernetes cluster
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+parameters:
+ docker_proxy:
+ type: string
+ default: ""
+
+ apt_proxy:
+ type: string
+ default: ""
+
+ public_net_id:
+ type: string
+ description: The ID of the Public network for floating IP address allocation
+
+ public_v6net_id:
+ type: string
+ description: The ID of the IPv6 provider network for ipv6 interface IP address allocation
+
+ int_net_cidr:
+ type: string
+ description: CIDR of the OAM ONAP network
+
+ ubuntu_1604_image:
+ type: string
+ description: Name of the Ubuntu 16.04 image
+
+ k8s_vm_flavor:
+ type: string
+ description: VM flavor for k8s hosts
+
+ helm_override_yaml:
+ type: string
+ description: Content for helm_override.yaml
+
+ docker_manifest:
+ type: string
+ default: ""
+
+ key_name:
+ type: string
+ default: "id-oran-int"
+
+ docker_version:
+ type: string
+ default: "18.06.1"
+
+ k8s_version:
+ type: string
+ default: "1.13.3"
+
+ k8s_cni_version:
+ type: string
+ default: "0.6.0"
+
+ helm_version:
+ type: string
+ default: "2.12.3"
+
+ helm_deploy_delay:
+ type: string
+ default: "2m"
+
+ use_ramdisk:
+ type: string
+ description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
+ default: "false"
+
+ mtu:
+ type: number
+ default: 1500
+
+resources:
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ cinder_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: 5
+ name:
+ str_replace:
+ template: volume_rand
+ params:
+ rand: { get_resource: random-str }
+
+ # RIC security group
+ ric_sg:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ name:
+ str_replace:
+ template: base_rand
+ params:
+ base: ric_sg
+ rand: { get_resource: random-str }
+ description: security group for RIC traffic
+ rules:
+ # All egress traffic
+ - direction: egress
+ ethertype: IPv4
+ - direction: egress
+ ethertype: IPv6
+ # ingress traffic
+ # ICMP
+ - protocol: icmp
+ ethertype: IPv4
+ - protocol: icmp
+ ethertype: IPv6
+ - protocol: udp
+ ethertype: IPv4
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: udp
+ ethertype: IPv6
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: tcp
+ ethertype: IPv4
+ port_range_min: 1
+ port_range_max: 65535
+ - protocol: tcp
+ ethertype: IPv6
+ port_range_min: 1
+ port_range_max: 65535
+ # additional IP Protocols to allow
+ # SCTP
+ - protocol: 132
+ ethertype: IPv4
+ - protocol: 132
+ ethertype: IPv6
+
+
+ # RIC internal network
+ int_network:
+ type: OS::Neutron::Net
+ properties:
+ name:
+ str_replace:
+ template: ric_network_rand
+ params:
+ rand: { get_resource: random-str }
+
+ int_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ name:
+ str_replace:
+ template: oam_network_rand
+ params:
+ rand: { get_resource: random-str }
+ network_id: { get_resource: int_network }
+ cidr: { get_param: int_net_cidr }
+ dns_nameservers: [ "8.8.8.8" ]
+
+
+ router:
+ type: OS::Neutron::Router
+ properties:
+ name:
+ list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']]
+ external_gateway_info:
+ network: { get_param: public_net_id }
+
+ router_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: { get_resource: router }
+ subnet_id: { get_resource: int_subnet }
+
--- /dev/null
+ ${VM_TYPE}_${VM_NUM}_private_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: int_network }
+ fixed_ips: [{"subnet": { get_resource: int_subnet }}]
+ security_groups:
+ - { get_resource: ric_sg }
+
+ ${VM_TYPE}_${VM_NUM}_ipv6_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: public_v6net_id }
+ security_groups:
+ - { get_resource: ric_sg }
+
+ ${VM_TYPE}_${VM_NUM}_floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+
+ ${VM_TYPE}_${VM_NUM}_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __k8s_version__: { get_param: k8s_version }
+ __k8s_cni_version__: { get_param: k8s_cni_version }
+ __helm_version__: { get_param: helm_version }
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __host_floating_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, floating_ip_address] }
+ __host_private_ipv6_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_ipv6_port, fixed_ips, 0, ip_address ] }
+ __k8s_mst_floating_ip_addr__: { get_attr: [${VM_TYPE}_mst_floating_ip, floating_ip_address] }
+ __k8s_mst_private_ip_addr__: { get_attr: [${VM_TYPE}_mst_floating_ip, fixed_ip_address] }
+ __k8s_mst_parivate_ipv6_addr__: { get_attr: [${VM_TYPE}_mst_ipv6_port, fixed_ips, 0, ip_address] }
+ __mtu__: { get_param: mtu }
+ __cinder_volume_id__: { get_resource: cinder_volume }
+ __stack_name__: { get_param: 'OS::stack_name' }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __host_label__: '$HOST_LABEL'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /opt/k8s_vm_aux_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_label__: '$HOST_LABEL'
+ template:
+ get_file: k8s_vm_aux_install.sh
+
+ - path: /opt/k8s_vm_custom_repos.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_label__: '$HOST_LABEL'
+ template:
+ get_file: k8s_vm_custom_repos.sh
+
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+
+ ${VM_TYPE}_${VM_NUM}_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_scripts }
+
+ ${VM_TYPE}_${VM_NUM}_vm:
+ type: OS::Nova::Server
+ properties:
+ name:
+ list_join: ['-', [ { get_param: 'OS::stack_name' }, '${VM_TYPE}', '${VM_NUM}' ] ]
+ image: { get_param: ubuntu_1604_image }
+ flavor: { get_param: ${VM_TYPE}_vm_flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+ - port: { get_resource: ${VM_TYPE}_${VM_NUM}_ipv6_port }
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_config }
+
--- /dev/null
+ ${VM_TYPE}_${VM_NUM}_private_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: int_network }
+ fixed_ips: [{"subnet": { get_resource: int_subnet }}]
+ security_groups:
+ - { get_resource: ric_sg }
+
+ ${VM_TYPE}_${VM_NUM}_floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+
+ ${VM_TYPE}_${VM_NUM}_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __k8s_version__: { get_param: k8s_version }
+ __k8s_cni_version__: { get_param: k8s_cni_version }
+ __helm_version__: { get_param: helm_version }
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __k8s_mst_floating_ip_addr__: { get_attr: [${VM_TYPE}_mst_floating_ip, floating_ip_address] }
+ __k8s_mst_private_ip_addr__: { get_attr: [${VM_TYPE}_mst_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ __cinder_volume_id__: { get_resource: cinder_volume }
+ __stack_name__: { get_param: OS::stack_name }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __host_label__: '$HOST_LABEL'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ ${VM_TYPE}_${VM_NUM}_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_scripts }
+
+ ${VM_TYPE}_${VM_NUM}_vm:
+ type: OS::Nova::Server
+ properties:
+ name:
+ list_join: ['-', [ { get_param: 'OS::stack_name' }, '${VM_TYPE}', '${VM_NUM}' ] ]
+ image: { get_param: ubuntu_1604_image }
+ flavor: { get_param: ${VM_TYPE}_vm_flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_config }
+
--- /dev/null
+outputs:
+ cinder_volume_id:
+ value: {get_resource: cinder_volume}
--- /dev/null
+
+outputs:
+ cinder_volume_id:
+ value: {get_resource: cinder_volume}
--- /dev/null
+# this script installs AUX infrastructure components
+
+# continue only on AUX cluster
+CINDER_V_ID=$(cat /opt/config/cinder_volume_id.txt)
+cat <<EOF > ./cinder_pv.yaml
+apiVersion: "v1"
+kind: "PersistentVolume"
+metadata:
+ name: "cinder-pv"
+spec:
+ capacity:
+ storage: "5Gi"
+ accessModes:
+ - "ReadWriteOnce"
+ cinder:
+ fsType: "ext3"
+ volumeID: "$CINDER_V_ID"
+EOF
+kubectl create -f ./cinder_pv.yaml
+
+
+# install fluentd
+LOGGING_NS="logging"
+kubectl create namespace "${LOGGING_NS}"
+while ! helm repo add incubator "https://kubernetes-charts-incubator.storage.googleapis.com/"; do
+ sleep 10
+done
+helm repo update
+helm install incubator/elasticsearch \
+ --namespace "${LOGGING_NS}" \
+ --name elasticsearch \
+ --set image.tag=6.7.0 \
+ --set data.terminationGracePeriodSeconds=0 \
+ --set master.persistence.enabled=false \
+ --set data.persistence.enabled=false
+helm install stable/fluentd-elasticsearch \
+ --name fluentd \
+ --namespace "${LOGGING_NS}" \
+ --set elasticsearch.host=elasticsearch-client.${LOGGING_NS}.svc.cluster.local \
+ --set elasticsearch.port=9200
+helm install stable/kibana \
+ --name kibana \
+ --namespace "${LOGGING_NS}" \
+ --set env.ELASTICSEARCH_URL=http://elasticsearch-client.${LOGGING_NS}.svc.cluster.local:9200 \
+ --set env.ELASTICSEARCH_HOSTS=http://elasticsearch-client.${LOGGING_NS}.svc.cluster.local:9200 \
+ --set env.SERVER_BASEPATH=/api/v1/namespaces/${LOGGING_NS}/services/kibana/proxy
+ #--set image.tag=6.4.2 \
+
+KIBANA_POD_NAME=$(kubectl get pods --selector=app=kibana -n "${LOGGING_NS}" \
+ --output=jsonpath="{.items..metadata.name}")
+wait_for_pods_running 1 "${LOGGING_NS}" "${KIBANA_POD_NAME}"
+
+
+# install prometheus
+PROMETHEUS_NS="monitoring"
+OPERATOR_POD_NAME="prometheus-prometheus-operator-prometheus-0"
+ALERTMANAGER_POD_NAME="alertmanager-prometheus-operator-alertmanager-0"
+helm install stable/prometheus-operator --name prometheus-operator --namespace "${PROMETHEUS_NS}"
+wait_for_pods_running 1 "${PROMETHEUS_NS}" "${OPERATOR_POD_NAME}"
+
+GRAFANA_POD_NAME=$(kubectl get pods --selector=app=grafana -n "${PROMETHEUS_NS}" \
+ --output=jsonpath="{.items..metadata.name}")
+
+
+
+cat <<EOF > ./ingress_lm.yaml
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: ingress-lm
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /
+spec:
+ rules:
+ - http:
+ paths:
+ - path: /kibana
+ backend:
+ serviceName: kibana
+ servicePort: 5601
+ - path: /operator
+ backend:
+ serviceName: prometheus-operator-prometheus
+ servicePort: 9090
+ - path: /alertmanager
+ backend:
+ serviceName: prometheus-operator-alertmanager
+ servicePort: 9093
+ - path: /grafana
+ backend:
+ serviceName: prometheus-operator-grafana
+ servicePort: 3000
+EOF
+kubectl apply -f ingress-lm.yaml
+
--- /dev/null
+# add rancodev CI tool hostnames
+if [ ! -z "${__RUNRICENV_GERRIT_IP__}" ]; then
+ echo "${__RUNRICENV_GERRIT_IP__} ${__RUNRICENV_GERRIT_HOST__}" >> /etc/hosts
+fi
+
+if [ ! -z "${__RUNRICENV_DOCKER_IP__}" ]; then
+ echo "${__RUNRICENV_DOCKER_IP__} ${__RUNRICENV_DOCKER_HOST__}" >> /etc/hosts
+
+ if [ ! -z "${__RUNRICENV_DOCKER_CERT__}" ]; then
+ mkdir -p /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
+ cat <<EOF >/etc/docker/ca.crt
+${__RUNRICENV_DOCKER_CERT__}
+EOF
+ cp /etc/docker/ca.crt \
+ /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/ca.crt
+ fi
+
+ service docker restart
+ systemctl enable docker.service
+ docker login -u ${__RUNRICENV_DOCKER_USER__} -p ${__RUNRICENV_DOCKER_PASS__} \
+ ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
+ docker pull ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/whoami:0.0.1
+fi
+
+
+if [ ! -z "${__RUNRICENV_HELMREPO_IP__}" ]; then
+ echo "${__RUNRICENV_HELMREPO_IP__} ${__RUNRICENV_HELMREPO_HOST__}" >> /etc/hosts
+ if [ ! -z "${__RUNRICENV_HELMREPO_CERT__}" ]; then
+ cat <<EOF >/etc/ca-certificates/update.d/helm.crt
+${__RUNRICENV_HELMREPO_CERT__}
+EOF
+ fi
+fi
+
+#!/bin/bash -x
################################################################################
# Copyright (c) 2019 AT&T Intellectual Property. #
# Copyright (c) 2019 Nokia. #
# limitations under the License. #
################################################################################
-
-# customize the following repo info to local infrastructure
-gerrithost=gerrit.o-ran-sc.org
-gerritip=35.165.179.212
-
-dockerregistry=nexus3.o-ran-sc.org
-dockerip=38.108.68.158
-dockerport=10004
-dockeruser=docker
-dockerpassword=docker
-
-helmrepo=helm.ricinfra.local
-helmport=30000
-helmip=''
-helmuser=helm
-helmpassword=helm
+echo "k8s_vm_init.sh"
--- /dev/null
+#!/bin/sh
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+### BEGIN INIT INFO
+# Provides: k8s_vm_init.sh
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Start daemon at boot time
+# Description: Enable service provided by daemon.
+### END INIT INFO
+
+echo "k8s_vm_init_serv.sh"
+
+
+dir="/opt"
+cmd="./k8s_vm_init.sh"
+user="root"
+
+name=`basename $0`
+pid_file="/var/run/$name.pid"
+stdout_log="/var/log/$name.log"
+stderr_log="/var/log/$name.err"
+
+get_pid() {
+ cat "$pid_file"
+}
+
+is_running() {
+ [ -f "$pid_file" ] && ps `get_pid` > /dev/null 2>&1
+}
+
+case "$1" in
+ start)
+ if is_running; then
+ echo "Already started"
+ else
+ echo "Starting $name"
+ cd "$dir"
+ if [ -z "$user" ]; then
+ sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
+ else
+ sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
+ fi
+ echo $! > "$pid_file"
+ if ! is_running; then
+ echo "Unable to start, see $stdout_log and $stderr_log"
+ exit 1
+ fi
+ fi
+ ;;
+ stop)
+ if is_running; then
+ echo -n "Stopping $name.."
+ kill `get_pid`
+ for i in {1..10}
+ do
+ if ! is_running; then
+ break
+ fi
+
+ echo -n "."
+ sleep 1
+ done
+ echo
+
+ if is_running; then
+ echo "Not stopped; may still be shutting down or shutdown may have failed"
+ exit 1
+ else
+ echo "Stopped"
+ if [ -f "$pid_file" ]; then
+ rm "$pid_file"
+ fi
+ fi
+ else
+ echo "Not running"
+ fi
+ ;;
+ restart)
+ $0 stop
+ if is_running; then
+ echo "Unable to stop, will not attempt to start"
+ exit 1
+ fi
+ $0 start
+ ;;
+ status)
+ if is_running; then
+ echo "Running"
+ else
+ echo "Stopped"
+ exit 1
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
--- /dev/null
+#!/bin/bash -x
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+
+# first parameter: number of expected running pods
+# second parameter: namespace (all-namespaces means all namespaces)
+# third parameter: [optional] keyword
+wait_for_pods_running () {
+ NS="$2"
+ CMD="kubectl get pods --all-namespaces "
+ if [ "$NS" != "all-namespaces" ]; then
+ CMD="kubectl get pods -n $2 "
+ fi
+ KEYWORD="Running"
+ if [ "$#" == "3" ]; then
+ KEYWORD="${3}.*Running"
+ fi
+
+ CMD2="$CMD | grep \"$KEYWORD\" | wc -l"
+ NUMPODS=$(eval "$CMD2")
+ echo "waiting for $NUMPODS/$1 pods running in namespace [$NS] with keyword [$KEYWORD]"
+ while [ $NUMPODS -lt $1 ]; do
+ sleep 5
+ NUMPODS=$(eval "$CMD2")
+ echo "> waiting for $NUMPODS/$1 pods running in namespace [$NS] with keyword [$KEYWORD]"
+ done
+}
+
+
+# first parameter: interface name
+start_ipv6_if () {
+ # enable ipv6 interface
+ # standard Ubuntu cloud image does not have dual interface configuration or ipv6
+ IPv6IF="$1"
+ if ifconfig -a $IPv6IF; then
+ echo "" >> /etc/network/interfaces.d/50-cloud-init.cfg
+ #echo "auto ${IPv6IF}" >> /etc/network/interfaces.d/50-cloud-init.cfg
+ echo "allow-hotplug ${IPv6IF}" >> /etc/network/interfaces.d/50-cloud-init.cfg
+ echo "iface ${IPv6IF} inet6 auto" >> /etc/network/interfaces.d/50-cloud-init.cfg
+ #dhclient -r $IPv6IF
+ #systemctl restart networking
+ ifconfig ${IPv6IF} up
+ fi
+}
+
+echo "k8s_vm_install.sh"
+set -x
+export DEBIAN_FRONTEND=noninteractive
+echo "__host_private_ip_addr__ $(hostname)" >> /etc/hosts
+printenv
+
+mkdir -p /opt/config
+echo "__docker_version__" > /opt/config/docker_version.txt
+echo "__k8s_version__" > /opt/config/k8s_version.txt
+echo "__k8s_cni_version__" > /opt/config/k8s_cni_version.txt
+echo "__helm_version__" > /opt/config/helm_version.txt
+echo "__host_private_ip_addr__" > /opt/config/host_private_ip_addr.txt
+echo "__k8s_mst_floating_ip_addr__" > /opt/config/k8s_mst_floating_ip_addr.txt
+echo "__k8s_mst_private_ip_addr__" > /opt/config/k8s_mst_private_ip_addr.txt
+echo "__mtu__" > /opt/config/mtu.txt
+echo "__cinder_volume_id__" > /opt/config/cinder_volume_id.txt
+echo "__stack_name__" > /opt/config/stack_name.txt
+
+ISAUX='false'
+if [[ $(cat /opt/config/stack_name.txt) == *aux* ]]; then
+ ISAUX='true'
+fi
+
+modprobe -- ip_vs
+modprobe -- ip_vs_rr
+modprobe -- ip_vs_wrr
+modprobe -- ip_vs_sh
+modprobe -- nf_conntrack_ipv4
+modprobe -- nf_conntrack_ipv6
+modprobe -- nf_conntrack_proto_sctp
+
+start_ipv6_if ens4
+
+# disable swap
+SWAPFILES=$(grep swap /etc/fstab | sed '/^#/ d' |cut -f1 -d' ')
+if [ ! -z $SWAPFILES ]; then
+ for SWAPFILE in $SWAPFILES
+ do
+ if [ ! -z $SWAPFILE ]; then
+ echo "disabling swap file $SWAPFILE"
+ if [[ $SWAPFILE == UUID* ]]; then
+ UUID=$(echo $SWAPFILE | cut -f2 -d'=')
+ swapoff -U $UUID
+ else
+ swapoff $SWAPFILE
+ fi
+ # edit /etc/fstab file, remove line with /swapfile
+ sed -i -e "/$SWAPFILE/d" /etc/fstab
+ fi
+ done
+fi
+# disable swap
+#swapoff /swapfile
+# edit /etc/fstab file, remove line with /swapfile
+#sed -i -e '/swapfile/d' /etc/fstab
+
+
+DOCKERV=$(cat /opt/config/docker_version.txt)
+KUBEV=$(cat /opt/config/k8s_version.txt)
+KUBECNIV=$(cat /opt/config/k8s_cni_version.txt)
+
+KUBEVERSION="${KUBEV}-00"
+CNIVERSION="${KUBECNIV}-00"
+DOCKERVERSION="${DOCKERV}-0ubuntu1.2~16.04.1"
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
+echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' > /etc/apt/sources.list.d/kubernetes.list
+
+# install low latency kernel, docker.io, and kubernetes
+apt-get update
+apt-get install -y linux-image-4.15.0-45-lowlatency curl jq netcat docker.io=${DOCKERVERSION}
+apt-get install -y kubernetes-cni=${CNIVERSION}
+apt-get install -y --allow-unauthenticated kubeadm=${KUBEVERSION} kubelet=${KUBEVERSION} kubectl=${KUBEVERSION}
+apt-mark hold kubernetes-cni kubelet kubeadm kubectl
+
+
+# test access to k8s docker registry
+kubeadm config images pull
+
+
+# non-master nodes have hostnames ending with -[0-9][0-9]
+if [[ $(hostname) == *-[0-9][0-9] ]]; then
+ echo "Done for non-master node"
+ echo "Starting an NC TCP server on port 29999 to indicate we are ready"
+ nc -l -p 29999 &
+else
+ # below are steps for initializating master node, only run on the master node.
+ # minion node join will be triggered from the caller of the stack creation as ssh command.
+
+
+ # create kubenetes config file
+ if [[ ${KUBEV} == 1.13.* ]]; then
+ cat <<EOF >/root/config.yaml
+apiVersion: kubeadm.k8s.io/v1alpha3
+kubernetesVersion: v${KUBEV}
+kind: ClusterConfiguration
+apiServerExtraArgs:
+ feature-gates: SCTPSupport=true
+networking:
+ dnsDomain: cluster.local
+ podSubnet: 10.244.0.0/16
+ serviceSubnet: 10.96.0.0/12
+
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+kind: KubeProxyConfiguration
+mode: ipvs
+EOF
+
+ elif [[ ${KUBEV} == 1.14.* ]]; then
+ cat <<EOF >/root/config.yaml
+apiVersion: kubeadm.k8s.io/v1beta1
+kubernetesVersion: v${KUBEV}
+kind: ClusterConfiguration
+apiServerExtraArgs:
+ feature-gates: SCTPSupport=true
+networking:
+ dnsDomain: cluster.local
+ podSubnet: 10.244.0.0/16
+ serviceSubnet: 10.96.0.0/12
+
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+kind: KubeProxyConfiguration
+mode: ipvs
+EOF
+
+ else
+ echo "Unsupported Kubernetes version requested. Bail."
+ exit
+ fi
+
+
+ # create a RBAC file for helm (tiller)
+ cat <<EOF > /root/rbac-config.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: tiller
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: tiller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: tiller
+ namespace: kube-system
+EOF
+
+ # start cluster (make sure CIDR is enabled with the flag)
+ kubeadm init --config /root/config.yaml
+
+
+ # install Helm
+ HELMV=$(cat /opt/config/helm_version.txt)
+ HELMVERSION=${HELMV}
+ cd /root
+ mkdir Helm
+ cd Helm
+ wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELMVERSION}-linux-amd64.tar.gz
+ tar -xvf helm-v${HELMVERSION}-linux-amd64.tar.gz
+ mv linux-amd64/helm /usr/local/bin/helm
+
+ # set up kubectl credential and config
+ cd /root
+ rm -rf .kube
+ mkdir -p .kube
+ cp -i /etc/kubernetes/admin.conf /root/.kube/config
+ chown root:root /root/.kube/config
+
+ # at this point we should be able to use kubectl
+ kubectl get pods --all-namespaces
+
+ # install flannel
+ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
+
+
+ # waiting for all 8 kube-system pods to be in running state
+ # (at this point, minions have not joined yet)
+ wait_for_pods_running 8 kube-system
+
+ # if running a single node cluster, need to enable master node to run pods
+ kubectl taint nodes --all node-role.kubernetes.io/master-
+
+ cd /root
+ # install RBAC for Helm
+ kubectl create -f rbac-config.yaml
+
+
+ rm -rf /root/.helm
+ helm init --service-account tiller
+ export HELM_HOME="/root/.helm"
+
+ # waiting for tiller pod to be in running state
+ wait_for_pods_running 1 kube-system tiller-deploy
+
+ while ! helm version; do
+ echo "Waiting for Helm to be ready"
+ sleep 15
+ done
+
+
+ echo "Starting an NC TCP server on port 29999 to indicate we are ready"
+ nc -l -p 29999 &
+
+ echo "Done with master node setup"
+fi
+
+
+# add rancodev CI tool hostnames
+if [[ ! -z "${__RUNRICENV_GERRIT_IP__}" && ! -z "${__RUNRICENV_GERRIT_HOST__}" ]]; then
+ echo "${__RUNRICENV_GERRIT_IP__} ${__RUNRICENV_GERRIT_HOST__}" >> /etc/hosts
+fi
+if [[ ! -z "${__RUNRICENV_DOCKER_IP__}" && ! -z "${__RUNRICENV_DOCKER_HOST__}" ]]; then
+ echo "${__RUNRICENV_DOCKER_IP__} ${__RUNRICENV_DOCKER_HOST__}" >> /etc/hosts
+fi
+if [[ ! -z "${__RUNRICENV_HELMREPO_IP__}" && ! -z "${__RUNRICENV_HELMREPO_HOST__}" ]]; then
+ echo "${__RUNRICENV_HELMREPO_IP__} ${__RUNRICENV_HELMREPO_HOST__}" >> /etc/hosts
+fi
+
+if [ ! -z "${__RUNRICENV_HELMREPO_CERT__}" ]; then
+ cat <<EOF >/etc/ca-certificates/update.d/helm.crt
+${__RUNRICENV_HELMREPO_CERT__}
+EOF
+fi
+
+# add cert for accessing docker registry in Azure
+if [ ! -z "${__RUNRICENV_DOCKER_CERT__}" ]; then
+ mkdir -p /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
+ cat <<EOF >/etc/docker/ca.crt
+${__RUNRICENV_DOCKER_CERT__}
+EOF
+ cp /etc/docker/ca.crt /etc/docker/certs.d/${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/ca.crt
+
+ service docker restart
+ systemctl enable docker.service
+ docker login -u ${__RUNRICENV_DOCKER_USER__} -p ${__RUNRICENV_DOCKER_PASS__} ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}
+ docker pull ${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}/whoami:0.0.1
+fi
+
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+OVERRIDEYAML=$1
+
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+source $DIR/../etc/nexus.conf
+
+if [ -z "$RICINFRA_RELEASE_NAME" ];then
+ RELEASE_NAME=$helm_release_name
+else
+ RELEASE_NAME=$RICINFRA_RELEASE_NAME
+fi
+if [ -z "$RICINFRA_NAMESPACE" ];then
+ NAMESPACE=$namespace
+else
+ NAMESPACE=$RICINFRA_NAMESPACE
+fi
+if [ -z "$INGRESS_PORT" ];then
+ INGRESS_PORT_NEXUS=$ingress_port
+else
+ INGRESS_PORT_NEXUS=$INGRESS_PORT
+fi
+
+if [ -z "$DEFAULT_NEXUS_ADMIN_PASSWORD" ];then
+ NEXUS_ADMIN_PASSWORD=$default_admin_password
+else
+ NEXUS_ADMIN_PASSWORD=$DEFAULT_NEXUS_ADMIN_PASSWORD
+fi
+
+
+
+HOSTPOSTFIX=$(cat $DIR/../helm/values.yaml | grep hostpostfix | awk '{print $2}')
+
+
+
+
+
+
+
+
+
+NEXUS_URL="http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS"
+
+
+
+
+
+
+if [ -z $OVERRIDEYAML ]; then
+
+ DOCKERPASSWORD=$(cat $DIR/../../20-Credential/helm/values.yaml | awk '/^.*repositoryCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+ HELMPASSWORD=$(cat $DIR/../../20-Credential/helm/values.yaml | awk '/^.*helmrepoCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+ ADMINPASSWORD=$(cat $DIR/../helm/values.yaml | awk '/^.*adminPassword:.*/{print $2}')
+
+else
+
+
+
+ DOCKERPASSWORD=$(cat $OVERRIDEYAML | awk '/^.*repositoryCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+
+
+
+ if [ -z $DOCKERPASSWORD ]; then
+ DOCKERPASSWORD=$(cat $DIR/../../20-Credential/helm/values.yaml | awk '/^.*repositoryCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+ fi
+
+ HELMPASSWORD=$(cat $OVERRIDEYAML | awk '/^.*helmrepoCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+
+ if [ -z $HELMPASSWORD ]; then
+ HELMPASSWORD=$(cat $DIR/../../20-Credential/helm/values.yaml | awk '/^.*helmrepoCredential:.*/{getline; getline; print substr($2, 2, length($2)-2);}')
+ fi
+
+ ADMINPASSWORD=$(cat $OVERRIDEYAML | awk '/^.*adminPassword:.*/{print $2;}')
+ if [ -z $ADMINPASSWORD ]; then
+ ADMINPASSWORD=$(cat $DIR/../helm/values.yaml | awk '/^.*adminPassword:.*/{print $2;}')
+ fi
+
+fi
+
+
+
+
+#echo $DOCKERPASSWORD, $HELMPASSWORD, $ADMINPASSWORD
+
+
+
+
+DOCKERPORT=$(cat $DIR/../helm/templates/deployment.yaml | awk '/.*- name: docker.*/{getline; print $2}')
+
+
+DOCKERREPOSCRIPT="{\"name\":\"docker_changepassword\",\
+ \"type\":\"groovy\",\
+ \"content\":\"security.securitySystem.changePassword('docker', '$DOCKERPASSWORD')\"}"
+
+
+
+
+# This line uses the default admin password
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H "Content-Type: application/json" --data "$DOCKERREPOSCRIPT" http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script upload failed!"
+fi
+
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H 'Content-Type: text/plain' -H 'Accept: application/json' http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/docker_changepassword/run)
+
+
+if [ "${STATUS}" == "200" ];then
+ echo "> docker password change succeeded!"
+else
+ echo "> docker password change failed!"
+fi
+
+
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE -u admin:$NEXUS_ADMIN_PASSWORD http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/docker_changepassword)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script deletion failed!"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+HELMREPOSCRIPT="{\"name\":\"helm_changepassword\",\
+ \"type\":\"groovy\",\
+ \"content\":\"security.securitySystem.changePassword('helm', '$HELMPASSWORD')\"}"
+
+
+
+# This line uses the default admin password
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H "Content-Type: application/json" --data "$HELMREPOSCRIPT" http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script upload failed!"
+fi
+
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H 'Content-Type: text/plain' -H 'Accept: application/json' http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/helm_changepassword/run)
+
+
+if [ "${STATUS}" == "200" ];then
+ echo "> helm password change succeeded!"
+else
+ echo "> helm password change failed!"
+fi
+
+
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE -u admin:$NEXUS_ADMIN_PASSWORD http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/helm_changepassword)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script deletion failed!"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+ADMINSCRIPT="{\"name\":\"admin_changepassword\",\
+ \"type\":\"groovy\",\
+ \"content\":\"security.securitySystem.changePassword('admin', '$ADMINPASSWORD')\"}"
+
+
+
+# This line uses the default admin password
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H "Content-Type: application/json" --data "$ADMINSCRIPT" http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script upload failed!"
+fi
+
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -u admin:$NEXUS_ADMIN_PASSWORD -X POST -H 'Content-Type: text/plain' -H 'Accept: application/json' http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/admin_changepassword/run)
+
+
+if [ "${STATUS}" == "200" ];then
+ echo "> admin password change succeeded!"
+else
+ echo "> admin password change failed!"
+fi
+
+NEXUS_ADMIN_PASSWORD=$ADMINPASSWORD
+STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X DELETE -u admin:$NEXUS_ADMIN_PASSWORD http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/admin_changepassword)
+
+
+if [ "${STATUS}" != "204" ];then
+ echo "> script deletion failed!"
+fi
+
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+OVERRIDEYAML=$1
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
+
+
+
+
+
+
+if [ -z $OVERRIDEYAML ]; then
+
+ DATAPATH=$(cat $DIR/../helm/values.yaml | awk '/^.*datapath:.*/{ print $2;}')
+
+else
+
+ DATAPATH=$(cat $OVERRIDEYAML | awk '/^.*datapath:.*/{ print $2;}')
+
+
+ if [ -z $DATAPATH ]; then
+ DATAPATH=$(cat $DIR/../helm/values.yaml | awk '/^.*datapath:.*/{ print $2;}')
+ fi
+
+fi
+
+if [ -e $DATAPATH ]; then
+ if [ ! -w $DATAPATH ]; then
+ echo "Error: you don't have write permission to directory $DATAPATH"
+ echo "Deployment terminated."
+ exit 1
+ fi
+
+ rm -rf $DATAPATH
+
+ mkdir -p $DATAPATH
+
+ tar -xf $DIR/../etc/conf.tar -C $DATAPATH
+
+ chmod -R a+rwx $DATAPATH
+else
+ mkdir -p $DATAPATH
+ if [ $? -eq 0 ]; then
+ tar -xf $DIR/../etc/conf.tar -C $DATAPATH
+ chmod -R a+rwx $DATAPATH
+ else
+ echo "Error: you don't have write permission to directory $DATAPATH"
+ echo "Deployment terminated."
+ exit 1
+ fi
+
+
+fi
+
+
echo "If nexus.$HOSTPOSTFIX is not resolved by your DNS server, please add an entry in your /etc/hosts file."
echo "****************************************************************************************************************"
+. ./deploy_nexus_data $OVERRIDEYAML
+
+
+
+NODENAME=$(kubectl get node | awk 'NR==2{print $1}')
+kubectl label --overwrite nodes $NODENAME nexus-node=enable
+
+
+
+
+
+
COMMON_CHART_VERSION=$(cat $DIR/../../../ric-platform/50-RIC-Platform/helm/common/Chart.yaml | grep version | awk '{print $2}')
helm package -d /tmp $DIR/../../../ric-platform/50-RIC-Platform/helm/common
helm install -f $OVERRIDEYAML --namespace "${NAMESPACE}" --name "${RELEASE_NAME}-nexus" $DIR/../helm
fi
-NEXUS_POD_NAME=$(kubectl get pod -n $NAMESPACE | grep nexus | grep ContainerCreating | awk '{print $1}')
+NEXUS_POD_NAME=$(kubectl get pod -n $NAMESPACE | grep nexus | grep -v "Terminating" | awk '{print $1}')
+
echo "Waiting Nexus to be ready."
echo " "
echo $IS_NEXUS_READY
-DOCKERPORT=$(cat $DIR/../helm/templates/deployment.yaml | awk '/.*- name: docker.*/{getline; print $2}')
-
-
-DOCKERREPOSCRIPT="{\"name\":\"create_docker_repo\",\
- \"type\":\"groovy\",\
- \"content\":\"repository.createDockerHosted('docker.snapshot',\
- $DOCKERPORT, null, 'default', false, true)\"}"
-
-echo $DOCKERREPOSCRIPT
-
-# This line uses the default admin password
-curl -u admin:admin123 -X POST -H "Content-Type: application/json" --data "$DOCKERREPOSCRIPT" http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script
-
-curl -u admin:admin123 -X POST -H 'Content-Type: text/plain' -H 'Accept: application/json' http://nexus.$HOSTPOSTFIX:$INGRESS_PORT_NEXUS/service/rest/v1/script/create_docker_repo/run
-
-
-
+. ./change_password $OVERRIDEYAML
# ingress port number of the K8S cluster
# It will be overrided by INGRESS_PORT
ingress_port=30000
+
+
+default_admin_password=admin123
- name: docker
containerPort: 10001
protocol: TCP
+ volumeMounts:
+ - name: nexus-config
+ mountPath: /nexus-data
+ volumes:
+ - name: nexus-config
+ persistentVolumeClaim:
+ claimName: nexus-claim
+
+
# livenessProbe:
# httpGet:
# path: /
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /repository/helm.local/
+ name: {{ include "nexus.fullname" . }}-helm
+ labels:
+ app: {{ template "nexus.name" . }}
+ chart: {{ template "nexus.chart" . }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service }}
+spec:
+ tls:
+ - hosts:
+ - helm.{{ .Values.ingress.hostpostfix }}
+ secretName: {{ include "common.helmrepositorycert" . }}
+ rules:
+ - host: helm.{{ .Values.ingress.hostpostfix }}
+ http:
+ paths:
+ - backend:
+ serviceName: {{ include "nexus.fullname" . }}
+ servicePort: nexus
+ path: /
heritage: {{ .Release.Service }}
spec:
tls:
- - hosts:
- - docker.{{ .Values.ingress.hostpostfix }}
- secretName: {{ include "common.repositorycert" . }}
+ - hosts:
+ - docker.{{ .Values.ingress.hostpostfix }}
+ secretName: {{ include "common.repositorycert" . }}
rules:
- host: nexus.{{ .Values.ingress.hostpostfix }}
http:
--- /dev/null
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nexus3-configuration
+spec:
+ capacity:
+ storage: {{ .Values.storagesize }}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ storageClassName: nexus3-storage
+ local:
+ path: {{ .Values.datapath }}
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: nexus-node
+ operator: In
+ values:
+ - enable
--- /dev/null
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+ name: nexus-claim
+spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName: nexus3-storage
+ resources:
+ requests:
+ storage: {{ .Values.storagesize }}
containers:
- name: cert-copy
image: alpine
- command: ["cp", "-rL", "/var/run/certs/..data/tls.crt", "/var/run/certs-copy/"]
+ command: [ "/bin/sh","-c","cp -rL /var/run/dockercerts/..data/tls.crt /var/run/certs-copy/dockertls.crt && cp -rL /var/run/helmcerts/..data/tls.crt /var/run/certs-copy/helmtls.crt"]
# command: ["tail", "-f", "/dev/null"]
volumeMounts:
- - name: certs
- mountPath: /var/run/certs
+ - name: dockercerts
+ mountPath: /var/run/dockercerts
+ readOnly: true
+ - name: helmcerts
+ mountPath: /var/run/helmcerts
readOnly: true
- name: write-to-volume
mountPath: /var/run/certs-copy
volumes:
- - name: certs
+ - name: dockercerts
secret:
secretName: {{ include "common.repositorycert" . }}
+ - name: helmcerts
+ secret:
+ secretName: {{ include "common.helmrepositorycert" . }}
- name: write-to-volume
hostPath:
path: /tmp
--- /dev/null
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+apiVersion: v1
+kind: Secret
+type: kubernetes.io/tls
+metadata:
+ name: {{ include "common.helmrepositorycert" . }}
+ labels:
+ app: {{ template "nexus.name" . }}
+ chart: {{ template "nexus.chart" . }}
+ heritage: {{ .Release.Service }}
+ release: {{ .Release.Name }}
+ annotations:
+ "helm.sh/hook": "pre-install"
+ "helm.sh/hook-delete-policy": "before-hook-creation"
+data:
+{{ ( include "common.helmrepository.gen-certs" . ) | indent 2 }}
--- /dev/null
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: nexus3-storage
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
# repositoryCertOverride: ""
repositoryCert: docker-reg-certs
+# This is the name of K8S secret that contains the helm repository cert
+# You can override this by using
+# helmRepositoryCertOverride: ""
+helmRepositoryCert: xapp-mgr-certs
+
+
+# This is the admin password
+adminPassword: admin123
+
# This is designed to be deployed using local image
image:
repository: nexus-repository-helm-apt
ingress:
hostpostfix: ricinfra.local
+
+storagesize: 20Gi
+datapath: /tmp/nexus3-data/
--- /dev/null
+# Copyright (c) 2019 AT&T Intellectual Property.
+# Copyright (c) 2019 Nokia.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+FROM alpine
+
+MAINTAINER "RIC"
+
+LABEL name="Generic initcontainer image for RIC Platform components"
+
+RUN apk update
+
+# iproute2: required for e2 termination
+RUN apk add iproute2
+# kubectl: required for xapp/ricplt helm installers
+RUN apk add openssl
+# kubectl: required for xapp/ricplt helm installers
+ADD https://storage.googleapis.com/kubernetes-release/release/v1.14.1/bin/linux/amd64/kubectl /bin/kubectl
+RUN chmod +x /bin/kubectl
+
+COPY bin/ricplt-init.sh /ricplt-init.sh
+
+#
+CMD /ricplt-init.sh
--- /dev/null
+#!/bin/sh
+# Copyright (c) 2019 AT&T Intellectual Property.
+# Copyright (c) 2019 Nokia.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# this is placeholder script, intended
+# to be overridden by individual RIC
+# component charts.
+exit 0
if [ -z $OVERRIDEYAML ]; then
- helm install --namespace "${NAMESPACE}" --name "${RELEASE_NAME}-credential" $DIR/../helm
+ helm install --namespace "${NAMESPACE}" --name "${RELEASE_NAME}-${NAMESPACE}-credential" $DIR/../helm
else
- helm install -f $OVERRIDEYAML --namespace "${NAMESPACE}" --name "${RELEASE_NAME}-credential" $DIR/../helm
+ helm install -f $OVERRIDEYAML --namespace "${NAMESPACE}" --name "${RELEASE_NAME}-${NAMESPACE}-credential" $DIR/../helm
fi
NAMESPACE=$RICPLT_NAMESPACE
fi
- helm delete --purge "${RELEASE_NAME}-credential"
+helm delete --purge "${RELEASE_NAME}-${NAMESPACE}-credential"
-----BEGIN CERTIFICATE-----
-MIIDCTCCAfGgAwIBAgIBATANBgkqhkiG9w0BAQsFADAdMRswGQYDVQQDExJkb2Nr
-ZXItcmVnaXN0cnktY2EwHhcNMTkwNTE1MTQ1NzQzWhcNMjAwNTE0MTQ1NzQzWjAQ
-MQ4wDAYDVQQDEwVuZXh1czCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-AKNNbC8/cil64YXmPYpN7mRaT5biVsZrOrFkXfX2AXcJAsLUL7E4t7keK4Ba/VwO
-pJKmgdjI4NxlTxe+zoOJVVz3+sMYz77UZTd1h/KDn1eBfozbqHcQ5lQksNDsL46d
-yG/JfYpEEzRha/QxEEdaaQLWMyOcf/SjWnscqMQ2cGvTaEiO8F1re2qrwfnrbqQm
-JYkIrmBbcGKMdg33edZpl3xbFc9eMfJGWuhaoC+Tk7Hj1EtV/O9KdPrvDYzO+h7P
-HVExKXU04h1f9ThAoeU9/o7EN266iuUiCVE6M0zekYSV4BuAkXqbUtmpbnYziAYo
-Cq2puiNUSirqXc6Bno12QykCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
-JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMCAGA1UdEQQZ
-MBeCFWRvY2tlci5yaWNpbmZyYS5sb2NhbDANBgkqhkiG9w0BAQsFAAOCAQEAg5aE
-yicj1VnG4G3JeCszj2hFFmg2eTnwkPsIlpeIMJY8zI/IZ2aQP5e0KPAM8IaKMnqq
-RPPusL2DF7Dl4A+ZBTkvfKtxIzwc7wnQGCfonMyM/MxyoBxXIJw+Qek42CpVbYYt
-qVZ3fz6VpCvF/QL6eGmVpMlvLIu15C9pxzEHPcUKpkM1mvKLRcxn2tt5daqjh9ao
-EvZHJt8kOh53c/FZHgA2+73I8Gx6BLUZPO0E+E1vC5K1FwtCl+Cmt6nX5jZEw1LK
-eNcb3oZhXrVnUoQ72HTJEWuPjydB7w1hxnzB5RYGZalM7XwDmOHwdGHlNxpmMbJa
-UZ4yL0N+nAUbLeUS2w==
+MIIDFzCCAf+gAwIBAgIRAPqRsyJWFaoXjwzmI+uQ5GkwDQYJKoZIhvcNAQELBQAw
+HTEbMBkGA1UEAxMSaGVsbS1yZXBvc2l0b3J5LWNhMB4XDTE5MDYyMjAzMDYxNVoX
+DTIwMDYyMTAzMDYxNVowEDEOMAwGA1UEAxMFbmV4dXMwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDBExs2W0HvXyinLL/LOkgM4Dobe1OVVntQzCGB8tqD
+xI4ZcXGh8G4b3GMLVe8vs0c0IUUbPhy23AkDxGUT7whLbo1UwGa41htxY51zok4S
+20NN706XVs6E0tKqA1L5kzyxLEAKgHE/EefWIf6MHtRt5GfwFVdjdfZP+9L/ZpUL
+u1HKmiWw7fv0m/MrcAQA7lILpB4xeaM8rYWtBj8TGeoMlWLOtOcFKGpY9Kuty1n7
+cebHcz++qY+Vsrf8RlG4lGlit2IY06FE+5ihK24Zt9Ttwf3lsTf6xDwAEGmDxOAp
+R61gY9wX8ClLO+ns+nkkEiJ90104a4U+qg115tZCGfBBAgMBAAGjXzBdMA4GA1Ud
+DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T
+AQH/BAIwADAeBgNVHREEFzAVghNoZWxtLnJpY2luZnJhLmxvY2FsMA0GCSqGSIb3
+DQEBCwUAA4IBAQA2Mt29tQ/3ACBpmZSKLPj3jTpPulXQKgtH9of9X0XBQ0ZdfqTN
+Evyu+K5gY/FF6RhnkaCML4XoOnvJHQQBydwMrK9f6mcC26U8PWZn9+usihw5GJHp
+Hu3pc0PDHb87ha+qCeie9ssKitz8pbJarTh6z1Ht8OT9/nktXkgPnIYI9ycAsdwy
+uBww45UxDu8OD8TT0/08RzH56ruPpRSYDNRbkcT8FZV8C6KgEKG55e/xNAITQwaD
+LD4xYkAZqK7lcPzuIM1kZUQAOJyw72TicMl94r0mL7pZ2NOJUCkkVvkYXo97TUKB
+zomEUpolyZnVpdl4HG3v3Lwd2K3bCs2X+HTc
-----END CERTIFICATE-----
*/}}
{{- define "repository.secret" -}}
{{- $repo := include "common.repository" . }}
- {{- $cred := .Values.repositoryCred }}
+ {{- $cred := .Values.repositoryCredential }}
{{- $user := default "docker" $cred.user }}
{{- $password := default "docker" $cred.password }}
{{- $mail := default "@" $cred.mail }}
{{- define "helmrepo.secret.user" -}}
- {{- $user := default "helm" .Values.helmrepoCred.user -}}
+ {{- $user := default "helm" .Values.helmrepoCredential.user -}}
{{- printf "%s" $user |b64enc }}
{{- end -}}
{{- define "helmrepo.secret.password" -}}
- {{- $pass := default "helm" .Values.helmrepoCred.password -}}
+ {{- $pass := default "helm" .Values.helmrepoCredential.password -}}
{{- printf "%s" $pass |b64enc }}
{{- end -}}
# Values for setting up Kubernetes resources for accessing infrastructure such as docker registry
# helm repo, etc.
+repository: "nexus3.o-ran-sc.org:10004"
-repositoryCred:
+repositoryCredential:
user: "docker"
password: "docker"
-helmrepoCred:
+helmrepoCredential:
user: "helm"
password: "helm"
--- /dev/null
+NAME: xappmgr
+DBAAS_SERVICE_HOST: "r0-dbaas-redis-standalone"
+DBAAS_SERVICE_PORT: "6379"
+DBAAS_PORT_6379_TCP_ADDR: "r0-dbaas-redis-standalone"
+DBAAS_PORT_6379_TCP_PORT: "6379"
--- /dev/null
+"local":
+ # Port on which the xapp-manager REST services are provided
+ "host": __REST_PORT__
+"helm":
+ # Remote helm repo URL. UPDATE this as required.
+ "repo": __HELM_REPO__
+
+ # Repo name referred within the xapp-manager
+ "repo-name": __REPO_NAME__
+
+ # Tiller service details in the cluster. UPDATE this as required.
+ "tiller-service": __TILLER_SERVICE__
+ "tiller-namespace": __TILLER_NAMESPACE__
+ "tiller-port": __TILLER_PORT__
+ # helm username and password files
+ "helm-username-file": "/opt/ric/secret/helm_repo_username"
+ "helm-password-file": "/opt/ric/secret/helm_repo_password"
+ "retry": 1
+"xapp":
+ #Namespace to install xAPPs
+ "namespace": "ricxapp"
+ "tarDir": "/tmp"
+ "schema": "descriptors/schema.json"
+ "config": "config/config-file.json"
+ "tmpConfig": "/tmp/config-file.json"
+++ /dev/null
-"local":
- # Port on which the xapp-manager REST services are provided
- "host": __REST_PORT__
-"helm":
- # Remote helm repo URL. UPDATE this as required.
- "repo": __HELM_REPO__
-
- # Repo name referred within the xapp-manager
- "repo-name": __REPO_NAME__
-
- # Tiller service details in the cluster. UPDATE this as required.
- "tiller-service": __TILLER_SERVICE__
- "tiller-namespace": __TILLER_SERVICE__
- "tiller-port": __TILLER_PORT__
metadata:
name: {{ include "appmgr.fullname" . }}-appconfig
data:
- {{- $restport := default ":8080" (printf ":%.0f" .Values.appmgr.containerPort) | quote -}}
- {{- $helmrepo := include "common.helmrepository" . | quote -}}
- {{- $reponame := default "helm-repo" .Values.appmgr.reponame | quote -}}
- {{- $tiller := include "common.helmrepositorytiller" . | quote -}}
- {{- $tillernamespace := include "common.helmrepositorytillernamespace" . | quote -}}
- {{- $tillerport := include "common.helmrepositorytillerport" . | quote -}}
- {{- (.Files.Glob "resources/xapp-manager.yaml").AsConfig | replace "__HELM_REPO__" $helmrepo | replace "__REST_PORT__" $restport | replace "__REPO_NAME__" $reponame | replace "__TILLER_SERVICE__" $tiller | replace "__TILLER_NAMESPACE__" $tillernamespace | replace "__TILLER_PORT__" $tillerport | nindent 2 }}
+ appmgr.yaml: |
+ {{- $restport := default ":8080" (printf ":%.0f" .Values.appmgr.containerPort) | quote -}}
+ {{- $helmrepo := include "common.helmrepository" . | quote -}}
+ {{- $reponame := default "helm-repo" .Values.appmgr.reponame | quote -}}
+ {{- $tiller := include "common.helmrepositorytiller" . | quote -}}
+ {{- $tillernamespace := include "common.helmrepositorytillernamespace" . | quote -}}
+ {{- $tillerport := include "common.helmrepositorytillerport" . | quote -}}
+ {{- (.Files.Glob "resources/appmgr.yaml").AsConfig | replace "__HELM_REPO__" $helmrepo | replace "__REST_PORT__" $restport | replace "__REPO_NAME__" $reponame | replace "__TILLER_SERVICE__" $tiller | replace "__TILLER_NAMESPACE__" $tillernamespace | replace "__TILLER_PORT__" $tillerport | nindent 2 }}
metadata:
name: {{ include "appmgr.fullname" . }}-appenv
data:
- {{- with .Values.appmgr.appenv }}
- {{- toYaml . | nindent 2 }}
- {{- end }}
+ {{- (.Files.Get "resources/appenv.yaml") | nindent 2 }}
+
release: {{ .Release.Name }}
spec:
hostname: {{ .Chart.Name }}
+ hostAliases:
+ - ip: "135.207.143.86"
+ hostnames:
+ - "helm.ricinfra.local"
imagePullSecrets:
- name: {{ include "common.repositoryCred" . }}
containers:
protocol: TCP
volumeMounts:
- name: config-volume
- mountPath: {{ .Values.appmgr.appconfigpath }}
+ mountPath: {{ .Values.appmgr.appconfigpath }}/appmgr.yaml
+ subPath: appmgr.yaml
- name: secret-volume
mountPath: {{ .Values.appmgr.appsecretpath }}
- name: cert-volume
################################################################################
repository: "nexus3.o-ran-sc.org:10004"
+#repositoryOverride: "nexus3.o-ran-sc.org:10004"
imagePullPolicy: IfNotPresent
repositoryCred: docker-reg-cred
-
# Helm Repo for xApp
# By default a local helm repo is used. The global setting will override
# the default value. You can further override using
#helmRepositoryOverride: ""
-helmRepository: "snapshot.helm.local.ric.org"
+helmRepository: "https://helm.ricinfra.local:31000"
# Default secret name for Helm Repo credential. .Value.golbal.helmRepositoryCred will
# override the default value. You can further override uing
appmgr:
image:
# xAppmanager Docker image name and tag
- name: xapp-manager
- tag: latest
+ name: ric-plt-appmgr
+ tag: 0.1.3
# This section describes xAppManager
replicaCount: 1
port: 8080
name: appmgr-service
protocol: "TCP"
- export: 30020
+ extport: 30020
# config
# Path referred in xapp-manager for retrieving configuration details
appconfigpath: /opt/ric/config
- # To be provided as env variables
- appenv:
- NAME: xappmgr
- #ENV1: "envvalue1"
- #ENV2: "envvalue2"
# secret
# Path referred in xapp-manager for retrieving helm repo secrets
*/}}
{{- define "common.helmrepositorytillerport" -}}
{{- if .Values.helmRepositoryTillerPortOverride -}}
- {{- printf "%s" .Values.helmRepositoryTillerPortOverride -}}
+ {{- printf "%.0f" .Values.helmRepositoryTillerPortOverride -}}
{{- else -}}
{{- if .Values.global -}}
{{- if .Values.global.helmRepositoryTillerPort -}}
- {{- printf "%s" .Values.global.helmRepositoryTillerPort -}}
+ {{- printf "%.0f" .Values.global.helmRepositoryTillerPort -}}
{{- else -}}
- {{- printf "%s" .Values.helmRepositoryTillerPort -}}
+ {{- printf "%.0f" .Values.helmRepositoryTillerPort -}}
{{- end -}}
{{- else -}}
- {{- printf "%s" .Values.helmRepositoryTillerPort -}}
+ {{- printf "%.0f" .Values.helmRepositoryTillerPort -}}
{{- end -}}
{{- end -}}
{{- end -}}
+
+{{/*
+Generate certificates for the helm repo
+*/}}
+{{- define "common.helmrepository.gen-certs" -}}
+{{- $altNames := list ( printf "helm.%s" .Values.ingress.hostpostfix ) -}}
+{{- $ca := genCA "helm-repository-ca" 365 -}}
+{{- $cert := genSignedCert ( include "nexus.name" . ) nil $altNames 365 $ca -}}
+tls.crt: {{ $cert.Cert | b64enc }}
+tls.key: {{ $cert.Key | b64enc }}
+{{- end -}}
+
release: {{ .Release.Name }}
spec:
hostname: {{ .Chart.Name }}
+ hostNetwork: true
+ dnsPolicy: ClusterFirstWithHostNet
imagePullSecrets:
- name: {{ include "common.repositoryCred" . }}
containers:
--- /dev/null
+{
+ "local": {
+ "host": ":8080"
+ },
+ "logger": {
+ "level": 3
+ },
+ "rmr": {
+ "protPort": "tcp:4560",
+ "maxSize": 2072,
+ "numWorkers": 1,
+ "txMessages": ["RIC_X2_LOAD_INFORMATION"],
+ "rxMessages": ["RIC_X2_LOAD_INFORMATION"]
+ },
+ "db": {
+ "namespace": "ricxapp",
+ "host": "dbaas",
+ "port": 6379
+ }
+}
--- /dev/null
+{
+ "definitions": {},
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://example.com/root.json",
+ "type": "object",
+ "title": "The Root Schema",
+ "required": [
+ "local",
+ "logger",
+ "rmr",
+ "db"
+ ],
+ "properties": {
+ "local": {
+ "$id": "#/properties/local",
+ "type": "object",
+ "title": "The Local Schema",
+ "required": [
+ "host"
+ ],
+ "properties": {
+ "host": {
+ "$id": "#/properties/local/properties/host",
+ "type": "string",
+ "title": "The Host Schema",
+ "default": "",
+ "examples": [
+ ":8080"
+ ],
+ "pattern": "^(.*)$"
+ }
+ }
+ },
+ "logger": {
+ "$id": "#/properties/logger",
+ "type": "object",
+ "title": "The Logger Schema",
+ "required": [
+ "level"
+ ],
+ "properties": {
+ "level": {
+ "$id": "#/properties/logger/properties/level",
+ "type": "integer",
+ "title": "The Level Schema",
+ "default": 0,
+ "examples": [
+ 3
+ ]
+ }
+ }
+ },
+ "rmr": {
+ "$id": "#/properties/rmr",
+ "type": "object",
+ "title": "The Rmr Schema",
+ "required": [
+ "protPort",
+ "maxSize",
+ "numWorkers",
+ "txMessages",
+ "rxMessages"
+ ],
+ "properties": {
+ "protPort": {
+ "$id": "#/properties/rmr/properties/protPort",
+ "type": "string",
+ "title": "The Protport Schema",
+ "default": "",
+ "examples": [
+ "tcp:4560"
+ ],
+ "pattern": "^(.*)$"
+ },
+ "maxSize": {
+ "$id": "#/properties/rmr/properties/maxSize",
+ "type": "integer",
+ "title": "The Maxsize Schema",
+ "default": 0,
+ "examples": [
+ 2072
+ ]
+ },
+ "numWorkers": {
+ "$id": "#/properties/rmr/properties/numWorkers",
+ "type": "integer",
+ "title": "The Numworkers Schema",
+ "default": 0,
+ "examples": [
+ 1
+ ]
+ },
+ "txMessages": {
+ "$id": "#/properties/rmr/properties/txMessages",
+ "type": "array",
+ "title": "The Txmessages Schema"
+ },
+ "rxMessages": {
+ "$id": "#/properties/rmr/properties/rxMessages",
+ "type": "array",
+ "title": "The Rxmessages Schema"
+ }
+ }
+ },
+ "db": {
+ "$id": "#/properties/db",
+ "type": "object",
+ "title": "The Db Schema",
+ "required": [
+ "host",
+ "namespace",
+ "port"
+ ],
+ "properties": {
+ "host": {
+ "$id": "#/properties/db/properties/host",
+ "type": "string",
+ "title": "The Host Schema",
+ "default": "",
+ "examples": [
+ "dbaas"
+ ],
+ "pattern": "^(.*)$"
+ },
+ "namespace": {
+ "$id": "#/properties/db/properties/namespace",
+ "type": "string",
+ "title": "The Namespace Schema",
+ "default": "",
+ "examples": [
+ "ricxapp"
+ ],
+ "pattern": "^(.*)$"
+ },
+ "port": {
+ "$id": "#/properties/db/properties/port",
+ "type": "integer",
+ "title": "The Port Schema",
+ "default": 0,
+ "examples": [
+ 6379
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
-################################################################################
-# Copyright (c) 2019 AT&T Intellectual Property. #
-# Copyright (c) 2019 Nokia. #
-# #
-# Licensed under the Apache License, Version 2.0 (the "License"); #
-# you may not use this file except in compliance with the License. #
-# You may obtain a copy of the License at #
-# #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-# #
-# Unless required by applicable law or agreed to in writing, software #
-# distributed under the License is distributed on an "AS IS" BASIS, #
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
-# See the License for the specific language governing permissions and #
-# limitations under the License. #
-################################################################################
+#
+# ==================================================================================
+# Copyright (c) 2019 Nokia
+# ==================================================================================
+#
+
+{{ if (eq .Values.ricapp.appconfig.override "") }}
apiVersion: v1
kind: ConfigMap
metadata:
- name: {{ .Release.Name }}-appconfig
+ name: {{ if .Values.ricapp.appconfig.override }}{{ .Values.ricapp.appconfig.override }}{{ else }}{{ .Release.Name }}-appconfig{{ end }}
data:
- {{- with .Values.ricapp.appconfig }}
- {{- toYaml . | nindent 2 }}
- {{- end }}
-
+{{ (.Files.Glob "config/*").AsConfig | indent 2 }}
+{{ end }}
-################################################################################
-# Copyright (c) 2019 AT&T Intellectual Property. #
-# Copyright (c) 2019 Nokia. #
-# #
-# Licensed under the Apache License, Version 2.0 (the "License"); #
-# you may not use this file except in compliance with the License. #
-# You may obtain a copy of the License at #
-# #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-# #
-# Unless required by applicable law or agreed to in writing, software #
-# distributed under the License is distributed on an "AS IS" BASIS, #
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
-# See the License for the specific language governing permissions and #
-# limitations under the License. #
-################################################################################
+#
+# ==================================================================================
+# Copyright (c) 2019 Nokia
+# ==================================================================================
+#
+
apiVersion: apps/v1
kind: Deployment
metadata:
app.kubernetes.io/name: {{ include "ricapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
- imagePullSecrets:
- - name: {{ .Release.Name }}-docker-registry-key
+ hostname: {{ .Values.ricapp.hostname }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.global.repository }}/{{ .Values.ricapp.image.name }}:{{ .Values.ricapp.image.tag }}"
imagePullPolicy: {{ .Values.global.image.pullPolicy }}
- # enable the next two elements if wanting to test pod bypassing its own entrypoint
- #command: ["sh"]
- #args:
- # - -c
- # - "while sleep 2; do echo thinking; done"
ports:
- name: http
- containerPort: 8080
+ containerPort: {{ .Values.ricapp.service.port }}
+ protocol: TCP
+ - name: rmr
+ containerPort: {{ .Values.ricapp.service.rmrPort }}
protocol: TCP
volumeMounts:
- name: config-volume
- mountPath: {{ .Values.ricapp.appconfigpath }}
- - name: secret-volume
- mountPath: {{ .Values.ricapp.appsecretpath }}
- envFrom:
- - configMapRef:
- name: {{ .Release.Name }}-appenv
+ mountPath: {{ .Values.ricapp.appconfig.path }}
+ - name: cert-volume
+ mountPath: {{ .Values.ricapp.cert.path }}
livenessProbe:
- exec:
- command:
- - /bin/bash
- - -c
- - ps -ef | grep {{ .Values.ricapp.livenessprocessname }}| grep -v "grep"
- initialDelaySeconds: 120
- periodSeconds: 30
+ httpGet:
+ path: {{ .Values.ricapp.probes.healthAliveCheckEndpoint }}
+ port: 8080
+ initialDelaySeconds: 5
+ periodSeconds: 15
readinessProbe:
httpGet:
- path: /
- port: http
+ path: {{ .Values.ricapp.probes.healthReadyCheckEndpoint }}
+ port: 8080
+ initialDelaySeconds: 5
+ periodSeconds: 15
restartPolicy: Always
resources:
- {{- toYaml .Values.resources | nindent 12 }}
+ {{- toYaml .Values.ricapp.resources | nindent 12 }}
securityContext:
# ubuntu
- runAsUser: 1000
- allowPrivilegeEscalation: false
- {{- with .Values.nodeSelector }}
+ #runAsUser: 1000
+ #allowPrivilegeEscalation: false
+ {{- with .Values.ricapp.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
- {{- with .Values.affinity }}
+ {{- with .Values.ricapp.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
- {{- with .Values.tolerations }}
+ {{- with .Values.ricapp.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: config-volume
configMap:
- name: {{ .Release.Name }}-appconfig
- - name: secret-volume
- secret:
- secretName: {{ .Release.Name }}-appsecret
+ name: {{ if .Values.ricapp.appconfig.override }}{{ .Values.ricapp.appconfig.override }}{{ else }}{{ .Release.Name }}-appconfig{{ end }}
+ - name: cert-volume
+ emptyDir: {}
# limitations under the License. #
################################################################################
-{{- if .Values.ricapp.enabled }}
+{{ if .Values.ricapp.service.enabled }}
apiVersion: v1
kind: Service
metadata:
- name: {{ include "ricapp.fullname" . }}
+ name: {{ include "ricapp.name" . }}
labels:
app.kubernetes.io/name: {{ include "ricapp.name" . }}
helm.sh/chart: {{ include "ricapp.chart" . }}
selector:
app.kubernetes.io/name: {{ include "ricapp.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
-{{ end }}
+{{ end }}
\ No newline at end of file
global:
# modify this section to point to your local testing settings
- repository: "snapshot.docker.ranco-dev-tools.eastus.cloudapp.azure.com:10001"
+ repository: "${__RUNRICENV_DOCKER_HOST__}:${__RUNRICENV_DOCKER_PORT__}"
repositoryCred:
- user: docker
- password: docker
+ user: "${__RUNRICENV_DOCKER_USER__}"
+ password: "${__RUNRICENV_DOCKER_PASS__}"
image:
pullPolicy: IfNotPresent
ricplt:
# This section is reserved for values imported from RIC Platform charts
-
ricapp:
- # This section is for xapp. Templates to be resolved from xApp descriptor
+ # This section is for xapp. Templates to be resolved from xApp descriptor
replicaCount: 1
- xappname: &anchor-xappname xapp-std
+ name: xapp-std
+ fullname: xapp-std
+ nameOverride: ""
+ fullnameOverride: ""
- # the name of the process that indicates the liveness of the component
- liveness-process-name: xapp-std
+ # The name of the process that indicates the liveness of the component
+ livenessprocessname: xapp-std
image:
- name: whoami
- tag: 0.0.1
-
- nameOverride: ""
- fullnameOverride: ""
+ name: xapp-std
+ tag: latest
service:
- enabled: false
+ enabled: true
+ type: ClusterIP
+ name: xapp-std-service
+ port: 8080
+ rmrPort: 4560
+ probes:
+ healthAliveCheckEndpoint: ric/v1/health/alive
+ healthReadyCheckEndpoint: ric/v1/health/ready
- # to be provided as property file
- appconfigpath: /opt/etc/xapp
appconfig:
- # to be present as file propfile1 under appconfigpath
- propfile1: |
- prop1.v1="propvalue1.1"
- prop1.v2="propvalue1.2"
- # to be present as file propfile2 under appconfigpath
- propfile2: "prop2"
+ path: /opt/ric/config
+ override: ""
- # to be provided as env variables
- appenv:
- NAME: *anchor-xappname
- ENV1: "envvalue1"
- ENV2: "envvalue2"
+ cert:
+ path: /opt/ric/certificates
+ object: {}
- # secret
- appsecretpath: /opt/etc/kube
- appsecret:
- # to be present as files under path appsecretpath
- username: myusername
- password: mypassword
-
+ hostname: xapp-std
resources: {}
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
nodeSelector: {}
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2019 AT&T Intellectual Property. #
+# Copyright (c) 2019 Nokia. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+if (( $# != 1 )); then
+ echo "Missing parameters: <xapp-name>"
+ exit
+fi
+
+source ./scripts/ric_env.sh
+if [ -z $__RICENV_SET__ ]; then
+ echo "Edit your ric_env.sh for first!"
+ exit
+fi
+
+# Update the local values
+RESULT_DIR=./generated
+rm -rf $RESULT_DIR && mkdir -p $RESULT_DIR && cp -rf ./helm $RESULT_DIR
+
+FILELIST=$(find ./helm \( -name "*.tpl" -o -name "*.yaml" \))
+for f in $FILELIST; do
+ envsubst '${__RUNRICENV_DOCKER_HOST__} ${__RUNRICENV_DOCKER_PORT__}' < $f > "$RESULT_DIR/$f";
+done
+
+# Rename the helm chart folder
+mv $RESULT_DIR/helm/xapp-std $RESULT_DIR/helm/$1
+find $RESULT_DIR/helm/$1 -type f | xargs sed -i -e "s/xapp-std/$1/g"
+
+# Push to helm chart repo
+helm package generated/helm/$1 | awk '{ print $NF }' | xargs mv -t $__RUNRICENV_HELMREPO_DIR__
+helm repo index $__RUNRICENV_HELMREPO_DIR__
+helm repo update
+#!/bin/bash
################################################################################
# Copyright (c) 2019 AT&T Intellectual Property. #
# Copyright (c) 2019 Nokia. #
# limitations under the License. #
################################################################################
-# Provides configuration parameters which spring expects as a JSON string
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: dashboard-configmap
-data:
- springApplication.json: |
- {
- "a1med": {
- "basepath": "http://0.1.2.3:45"
- },
- "e2mgr": {
- "basepath": "http://1.2.3.4:56"
- },
- "xappmgr": {
- "basepath": "http://2.3.4.5:67"
- }
- }
+# customize the following repo info to local environment
+export __RICENV_SET__='true'
+export __RUNRICENV_DOCKER_HOST__='192.168.0.6'
+export __RUNRICENV_DOCKER_PORT__='5000'
+export __RUNRICENV_HELMREPO_DIR__='/var/www/html/charts/'
\ No newline at end of file