This patch fixes bugs in ONAP portal and AAF.
Signed-off-by: Zhe Huang <zhehuang@research.att.com>
Change-Id: Ic626e84eba05fcaac14e8f15ba4c95b8a1eac61e
AUX_COMMON_CHART_VERSION=$(cat $ROOT_DIR/../ric-common/Common-Template/helm/aux-common/Chart.yaml | grep version | awk '{print $2}')
helm package -d /tmp $ROOT_DIR/../ric-common/Common-Template/helm/aux-common
-cp /tmp/aux-common-$COMMON_CHART_VERSION.tgz $HELM_HOME/repository/local/
+cp /tmp/aux-common-$AUX_COMMON_CHART_VERSION.tgz $HELM_HOME/repository/local/
helm repo index $HELM_HOME/repository/local/
#-------------------------------------------------------------------------
common:
- releasePrefix: r3
+ releasePrefix: r4
# If a local docker registry is used, please specify it using the following option
# localregistry: nexus3.o-ran-sc.org:10004
image:
registry: nexus3.o-ran-sc.org:10004/o-ran-sc
name: ric-dashboard
- tag: 2.0.0
+ tag: 2.0.1
cipher:
enc:
key: AGLDdG4D04BKm2IxIWEr8o==
fi
+LABELFOUND=false
+for f in $NODENAME; do
+ LABEL=$(kubectl describe node $f | grep "portal-storage=enable")
+ if [ ! -z "$LABEL" ]; then
+ LABELFOUND=true
+ fi
+done
+
+if ! $LABELFOUND; then
+ echo "***********************************************************************************************"
+ echo "* ERROR!!!!!!!!!!!!! *"
+ echo "***********************************************************************************************"
+ echo "* Nodes label \"portal-storage=enable\" is not found in any of the cluster node. *"
+ echo "* Please pick a node and label it using the following command. i *"
+ echo "* kubectl label --overwrite nodes <YOUR_NODE_NAME> portal-storage=enable *"
+ echo "***********************************************************************************************"
+
+ exit 1
+fi
+
+
+
if ! kubectl get ns ${AUXNAMESPACE:-ricaux}> /dev/null 2>&1; then
kubectl create ns ${AUXNAMESPACE:-ricaux}
fi
kubectl create ns onap
fi
+HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"jobs.batch \"ricaux-portal-db-config\" is forbidden: User \"system:serviceaccount:ricaux:default\" cannot get resource \"jobs/status\" in API group \"batch\" in the namespace \"ricaux\"","reason":"Forbidden","details":{"name":"ricaux-portal-db-config","group":"batch","kind":"jobs"},"code":403}
+
+
+HTTP response body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"pods is forbidden: User \"system:serviceaccount:onap:default\" cannot list resource \"pods\" in API group \"\" in the namespace \"onap\"","reason":"Forbidden","details":{"kind":"pods"},"code":403}
+
+
+
+echo Add cluster roles
+ cat >ricaux-role.yaml <<EOF
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ricaux-system-default
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["list"]
+ - apiGroups: ["batch"]
+ resources: ["jobs/status"]
+ verbs: ["get"]
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ricaux-system-default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ricaux-system-default
+subjects:
+ - kind: ServiceAccount
+ name: default
+ namespace: ${AUXNAMESPACE:-ricaux}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: onap-system-default
+rules:
+ - apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["list"]
+ - apiGroups: ["apps"]
+ resources: ["replicasets/status"]
+ verbs: ["get"]
+ - apiGroups: ["batch"]
+ resources: ["jobs/status"]
+ verbs: ["get"]
+ - apiGroups: ["apps"]
+ resources: ["deployments", "statefulsets"]
+ verbs: ["get"]
+---
+
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: onap-system-default
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: onap-system-default
+subjects:
+ - kind: ServiceAccount
+ name: default
+ namespace: onap
+EOF
+kubectl apply -f ricaux-role.yaml
+rm ricaux-role.yaml
+
kubectl create configmap -n ${AUXNAMESPACE:-ricaux} aux-recipe --from-file=recipe=$OVERRIDEYAML
+
+echo "Clean up dockerdata-nfs directory"
+rm -rf /dockerdata-nfs
+
+
echo "Deploying AUX components [$COMPONENTS]"
path: /etc/localtime
- name: {{ include "common.fullname" . }}-data
persistentVolumeClaim:
- claimName: {{ include "common.namespace" . }}-aaf-sshsm-data
+ claimName: {{ .Release.Name }}-aaf-sshsm-data
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
- /root/job_complete.py
args:
- -j
- - "{{ include "common.namespace" . }}-aaf-sshsm-distcenter"
+ - "{{ .Release.Name }}-aaf-sshsm-distcenter"
env:
- name: NAMESPACE
valueFrom:
path: /etc/localtime
- name: {{ include "common.fullname" . }}-data
persistentVolumeClaim:
- claimName: {{ include "common.namespace" . }}-aaf-sshsm-data
+ claimName: {{ .Release.Name }}-aaf-sshsm-data
- name: {{ include "common.fullname" . }}-dbus
persistentVolumeClaim:
- claimName: {{ include "common.namespace" . }}-aaf-sshsm-dbus
+ claimName: {{ .Release.Name }}-aaf-sshsm-dbus
- name: {{ include "common.fullname" . }}-secrets
secret:
- secretName: {{ include "common.namespace" . }}-aaf-sshsm
+ secretName: {{ .Release.Name }}-aaf-sshsm
imagePullSecrets:
- name: "{{ include "common.namespace" . }}-docker-registry-key"
version: 0.1.0
repository: "file://./subcharts/certificate-manager"
condition: certificate-manager.enabled
+ - name: danm-networks
+ version: 1.0.0
+ repository: "file://./subcharts/danm-networks"
+ condition: danm-networks.enabled
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
+ {{- if $.Values.global }}
+ {{- if $.Values.global.danm_networks }}
+ {{- $networklist := list }}
+ {{- range $network := $.Values.global.danm_networks }}
+ {{- if $network.tenants }}
+ {{- if $network.tenants.kong }}
+ {{- $networklist = append $networklist $network }}
+ {{- end }}
+ {{- end }}
+ {{- end }}
+ {{- if $networklist }}
+ danm.k8s.io/interfaces: |
+ [
+ {{- range $network := $networklist }}
+ {{- printf "\n {\"clusterNetwork\": \"%s\"" $network.name }}
+ {{- if $network.tenants.kong.ip }}
+ {{- printf ", \"ip\": \"%s\"" $network.tenants.kong.ip }}
+ {{- else }}
+ {{- printf ", \"ip\": \"dynamic\"" }}
+ {{- end }}
+ {{- if $network.tenants.kong.ip6 }}
+ {{- printf ", \"ip6\": \"%s\"" $network.tenants.kong.ip6 }}
+ {{- end }}
+ {{- if $network.tenants.kong.proutes }}
+ {{- printf ", \"proutes\": {" }}
+ {{- range $subnet, $gw := $network.tenants.kong.proutes }}
+ {{- if eq $subnet ( first ( keys $network.tenants.kong.proutes ))}}
+ {{- printf "\"%s\": \"%s\"" $subnet $gw }}
+ {{- else }}
+ {{- printf ", \"%s\": \"%s\"" $subnet $gw }}
+ {{- end }}
+ {{- end }}
+ {{- printf "}" }}
+ {{- end }}
+ {{- printf "}" }}
+ {{- end }}
+ ]
+ {{- end }}
+ {{- end }}
+ {{- end }}
labels:
{{- include "kong.metaLabels" . | nindent 8 }}
app.kubernetes.io/component: app
- /root/job_complete.py
args:
- --job-name
- - {{ include "common.namespace" . }}-portal-db-config
+ - {{ .Release.Name }}-portal-db-config
env:
- name: NAMESPACE
valueFrom:
-/* Create RIC Dashboard app */
-
INSERT IGNORE INTO `fn_app` (`app_id`, `app_name`, `app_image_url`, `app_description`, `app_notes`, `app_url`, `app_alternate_url`, `app_rest_endpoint`, `ml_app_name`, `ml_app_admin_id`, `mots_id`, `app_password`, `open`, `enabled`, `thumbnail`, `app_username`, `ueb_key`, `ueb_secret`, `ueb_topic_name`, `app_type`,`auth_central`,`auth_namespace`) VALUES
(12, 'RIC-Dashboard', '', NULL, NULL, '{{.Values.config.ricdashboardProtocol}}://{{.Values.config.ricdashboardHostName}}:{{.Values.config.ricdashboardPort}}', '','http://service-ricaux-dashboard-http:8080/api/v3', '', '', NULL, 'password', 'N', 'Y', NULL, 'Default', 'ueb_key', 'ueb_secret', 'ECOMP-PORTAL-OUTBOX', 1,'N',NULL);
-Subproject commit 91184df84b97b4d867f3324c5dc3e6a6a734134a
+Subproject commit 98d784eba3506478481d5a22edbee6ca0bdb68da