chartmuseum:
- repositoryOverride: "registry.hub.docker.com"
+ repositoryOverride: "docker.io"
image:
name: chartmuseum/chartmuseum
tag: v0.8.2
+ job:
+ image:
+ name: alpine
+ tag: latest
storagesize: 2Gi
- datapath: /tmp/chartmuseum-data/
# limitations under the License. #
################################################################################
-OVERRIDEYAML=$1
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-
-
-
-
-
-
-if [ -z $OVERRIDEYAML ]; then
-
- DATAPATH=$(cat $DIR/../helm/chartmuseum/values.yaml | awk '/^.*datapath:.*/{ print $2;}')
-
-else
- DATAPATH=$(cat $OVERRIDEYAML | awk '/^chartmuseum:.*/{ getline; while ( match($0, / .*/) || (!$0)){ if(getline == 0){break;}; if (match($0, /^ .*datapath:.*/)){print $2}} }')
-
-
- if [ -z $DATAPATH ]; then
- DATAPATH=$(cat $DIR/../helm/chartmuseum/values.yaml | awk '/^.*datapath:.*/{ print $2;}')
- fi
-
-fi
-
- rm -rf $DATAPATH
-
- mkdir -p $DATAPATH
-
-
- chmod -R a+rwx $DATAPATH
+# This script clean up the PV data for aux cluster
+rm -rf /opt/data/dashboard-data
+rm -rf /opt/data/chartmuseum-data
COMMON_OVERRIDE=$RIC_COMMON_OVERRIDE
fi
+
+
+NODENAME=$(kubectl get node | awk '{print $1}')
+LABELFOUND=false
+for f in $NODENAME; do
+ LABEL=$(kubectl describe node $f | grep local-storage)
+ if [ ! -z "$LABEL" ]; then
+ LABELFOUND=true
+ fi
+done
+
+if [ ! -z $OVERRIDEYAML ]; then
+ FOUND_STORAGECLASS=$(grep storageclass $OVERRIDEYAML)
+fi
+
+
+
+if ! $LABELFOUND && [ -z "$FOUND_STORAGECLASS" ]; then
+ echo "***********************************************************************************************"
+ echo "* ERROR!!!!!!!!!!!!! *"
+ echo "***********************************************************************************************"
+ echo "* Nodes label \"local-storage=enable\" is not found in any of the cluster node. *"
+ echo "* Please pick a node and label it using the following command. *"
+ echo "* kubectl label --overwrite nodes <YOUR_NODE_NAME> local-storage=enable *"
+ echo "***********************************************************************************************"
+
+ exit 1
+fi
+
+
+
+
+if [ -z "$FOUND_STORAGECLASS" ] && $LABELFOUND; then
+
+ DATAPATH=$(cat $DIR/../helm/dashboard/values.yaml | grep datapath | awk '{ print $2}' )
+
+
+ if [ ! -z $OVERRIDEYAML ]; then
+ DATAPATHOVERRIDE=$(cat $OVERRIDEYAML | grep datapath | awk '{ print $2}' )
+ fi
+
+ if [ ! -z "$DATAPATHOVERRIDE" ]; then
+ DATAPATH=$DATAPATHOVERRIDE
+ fi
+
+
+ echo "***********************************************************************************************"
+ echo "* WARNING!!!!!!!!!!!!! *"
+ echo "***********************************************************************************************"
+ echo "* Chartmuseume will use local storage. Please make sure that directory *"
+ echo "* $DATAPATH *"
+ echo "* exists on the selected cluster node, and contains the proper files. *"
+ echo "***********************************************************************************************"
+
+
+fi
+
+
+
+
+
+
+
RICAUX_COMPONENTS="dashboard ves message-router"
echo "Deploying RIC AUX components [$RICAUX_COMPONENTS]"
-# Remove this persistent volume when cloud storage is available
+{{- if not .Values.dashboard.storageclass }}
+
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: storageclass-{{ include "common.name.dashboard" . }}
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+
+---
+
apiVersion: v1
kind: PersistentVolume
metadata:
required:
nodeSelectorTerms:
- matchExpressions:
- - key: dashboard-node
+ - key: local-storage
operator: In
values:
- enable
+{{- end -}}
+
spec:
accessModes:
- ReadWriteOnce
- storageClassName: storageclass-{{ include "common.name.dashboard" . }}
+ storageClassName: {{ .Values.dashboard.storageclass | default (printf "storageclass-%s" ( include "common.name.dashboard" . )) }}
resources:
requests:
storage: {{ .Values.dashboard.storagesize }}
+++ /dev/null
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: storageclass-{{ include "common.name.dashboard" . }}
-provisioner: kubernetes.io/no-provisioner
-volumeBindingMode: WaitForFirstConsumer
# The keys listed below MUST be configured in each deployment;
# this list does not include all keys recognized by the app.
+ # Specify a storage class to bypass the local storage definition.
+ # storageclass:
# persist user details as JSON to a persistent volume
storagesize: 1Mi
# in the container
userfile: /dashboard-data/users.json
# on the server
- datapath: /tmp/dashboard-data
+ datapath: /opt/data/dashboard-data
# The URL prefixes use K8S/Kong service names
a1med:
+NODENAME=$(kubectl get node | awk '{print $1}')
+LABELFOUND=false
+for f in $NODENAME; do
+ LABEL=$(kubectl describe node $f | grep local-storage)
+ if [ ! -z "$LABEL" ]; then
+ LABELFOUND=true
+ fi
+done
+
+if [ ! -z $OVERRIDEYAML ]; then
+ FOUND_STORAGECLASS=$(grep storageclass $OVERRIDEYAML)
+fi
+
+
+
+if ! $LABELFOUND && [ -z "$FOUND_STORAGECLASS" ]; then
+ echo "***********************************************************************************************"
+ echo "* ERROR!!!!!!!!!!!!! *"
+ echo "***********************************************************************************************"
+ echo "* Nodes label \"local-storage=enable\" is not found in any of the cluster node. *"
+ echo "* Please pick a node and label it using the following command. *"
+ echo "* kubectl label --overwrite nodes <YOUR_NODE_NAME> local-storage=enable *"
+ echo "***********************************************************************************************"
+
+ exit 1
+fi
+
+
+
+
+if [ -z "$FOUND_STORAGECLASS" ] && $LABELFOUND; then
+
+ DATAPATH=$(cat $DIR/../helm/chartmuseum/values.yaml | grep datapath | awk '{ print $2}' )
+
+
+ if [ ! -z $OVERRIDEYAML ]; then
+ DATAPATHOVERRIDE=$(cat $OVERRIDEYAML | grep datapath | awk '{ print $2}' )
+ fi
+
+ if [ ! -z "$DATAPATHOVERRIDE" ]; then
+ DATAPATH=$DATAPATHOVERRIDE
+ fi
+
-NODENAME=$(kubectl get node | awk 'NR==2{print $1}')
-kubectl label --overwrite nodes $NODENAME helm-node=enable
+ echo "***********************************************************************************************"
+ echo "* WARNING!!!!!!!!!!!!! *"
+ echo "***********************************************************************************************"
+ echo "* Chartmuseume will use local storage. Please make sure that directory *"
+ echo "* $DATAPATH *"
+ echo "* exists on the selected cluster node, and contains the proper files. *"
+ echo "***********************************************************************************************"
-DIRTEMP=$DIR
-. "$DIR/clear_data_path"
-DIR=$DIRTEMP
+fi
RICINFRA_COMPONENTS="chartmuseum"
-mkdir -p /tmp/chartmuseum-data
echo "Deploying RIC infra components [$RICINFRA_COMPONENTS]"
echo "Helm Release Name: $RELEASE_NAME"
app: {{ include "common.namespace.infra" . }}-{{ include "common.name.chartmuseum" . }}
release: {{ .Release.Name }}
spec:
+ securityContext:
+ fsGroup: 0
+ runAsUser: 0
hostname: {{ .Chart.Name }}
imagePullSecrets:
- name: {{ include "common.repositoryCred" . }}
spec:
template:
spec:
+ imagePullSecrets:
+ - name: {{ include "common.repositoryCred" . }}
containers:
- name: cert-copy
- image: alpine
+ image: {{ include "common.repository" . }}/{{ .Values.chartmuseum.job.image.name }}:{{ .Values.chartmuseum.job.image.tag }}
+ imagePullPolicy: {{ include "common.pullPolicy" . }}
command: [ "/bin/sh","-c","cp -rL /var/run/helmcerts/..data/tls.crt /var/run/certs-copy/helmtls.crt"]
# command: ["tail", "-f", "/dev/null"]
volumeMounts:
+{{- if not .Values.chartmuseum.storageclass }}
+
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name: storageclass-{{ include "common.name.chartmuseum" . }}
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+
+---
+
apiVersion: v1
kind: PersistentVolume
metadata:
required:
nodeSelectorTerms:
- matchExpressions:
- - key: helm-node
+ - key: local-storage
operator: In
values:
- enable
+{{- end -}}
+
spec:
accessModes:
- ReadWriteOnce
- storageClassName: storageclass-{{ include "common.name.chartmuseum" . }}
+ storageClassName: {{ .Values.chartmuseum.storageclass | default (printf "storageclass-%s" ( include "common.name.chartmuseum" . )) }}
resources:
requests:
storage: {{ .Values.chartmuseum.storagesize }}
+++ /dev/null
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: storageclass-{{ include "common.name.chartmuseum" . }}
-provisioner: kubernetes.io/no-provisioner
-volumeBindingMode: WaitForFirstConsumer
# Default values for nexus.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
-repository: "registry.hub.docker.com"
+repository: "docker.io"
imagePullPolicy: IfNotPresent
repositoryCred: docker-reg-cred
helmRepositoryCert: xapp-mgr-certs
chartmuseum:
+ repositoryOverride: "docker.io"
replicaCount: 1
+ job:
+ image:
+ name: alpine
+ tag: latest
+
# This is designed to be deployed using local image
image:
name: chartmuseum/chartmuseum
tag: v0.8.2
-
+# Specify a storage class to bypass the local storage definition.
+# storageclass:
storagesize: 2Gi
- datapath: /tmp/chartmuseum-data/
+ datapath: /opt/data/chartmuseum-data
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
-
-
OVERRIDEYAML=$1
-
-
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
-
source $DIR/../etc/tiller.conf
if [ -z "$RICINFRA_RELEASE_NAME" ];then
- RELEASE_NAME=$helm_release_name
+ RELEASE_NAME=$helm_release_name
else
- RELEASE_NAME=$RICINFRA_RELEASE_NAME
+ RELEASE_NAME=$RICINFRA_RELEASE_NAME
fi
+
# Namespace configuration
if [ -z "$RICPLT_NAMESPACE" ];then
- PLT_NAMESPACE=$plt_namespace
+ PLT_NAMESPACE=$plt_namespace
else
- PLT_NAMESPACE=$RICPLT_NAMESPACE
+ PLT_NAMESPACE=$RICPLT_NAMESPACE
fi
+
if [ -z "$RICXAPP_NAMESPACE" ];then
- XAPP_NAMESPACE=$xapp_namespace
+ XAPP_NAMESPACE=$xapp_namespace
else
- XAPP_NAMESPACE=$RICXAPP_NAMESPACE
+ XAPP_NAMESPACE=$RICXAPP_NAMESPACE
fi
+
if [ -z "$RICAUX_NAMESPACE" ];then
- AUX_NAMESPACE=$aux_namespace
+ AUX_NAMESPACE=$aux_namespace
else
- AUX_NAMESPACE=$RICAUX_NAMESPACE
+ AUX_NAMESPACE=$RICAUX_NAMESPACE
fi
+
if [ -z "$RICINFRA_NAMESPACE" ];then
- INFRA_NAMESPACE=$infra_namespace
+ INFRA_NAMESPACE=$infra_namespace
else
- INFRA_NAMESPACE=$RICINFRA_NAMESPACE
+ INFRA_NAMESPACE=$RICINFRA_NAMESPACE
fi
if [ -z "$RIC_COMMON_OVERRIDE" ];then
- COMMON_OVERRIDE="--set global.namespace.platform=$PLT_NAMESPACE --set global.namespace.xapp=$XAPP_NAMESPACE --set global.namespace.aux=$AUX_NAMESPACE --set global.namespace.infra=$INFRA_NAMESPACE"
+ COMMON_OVERRIDE="--set global.namespace.platform=$PLT_NAMESPACE --set global.namespace.xapp=$XAPP_NAMESPACE --set global.namespace.aux=$AUX_NAMESPACE --set global.namespace.infra=$INFRA_NAMESPACE"
else
- COMMON_OVERRIDE=$RIC_COMMON_OVERRIDE
+ COMMON_OVERRIDE=$RIC_COMMON_OVERRIDE
fi
-
-
RICINFRA_COMPONENTS="xapp-tiller"
echo "Deploying RIC infra components [$RICINFRA_COMPONENTS]"