## Deploy O2 services with helm chart over O-Cloud controller node (with auth user admin)
```sh
-export NAMESPACE=orano2
+export NAMESPACE=oran-o2
kubectl create ns ${NAMESPACE}
cd /home/sysadmin/
source /etc/platform/openrc
cat <<EOF>ocloud-override.yaml
o2ims:
- imagePullSecrets: admin-orano2-registry-secret
+ serviceaccountname: admin-oran-o2
image:
repository: registry.local:9001/admin/o2imsdms
tag: 0.1.4
apiVersion: v1
appVersion: "1.0"
description: A Helm chart to deploy O2 Services
-name: orano2
+name: oran-o2
version: 0.1.0
kind: ServiceAccount
metadata:
name: {{ .Values.o2ims.serviceaccountname }}
- namespace: orano2
+ namespace: oran-o2
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: {{ .Values.o2ims.serviceaccountname }}
subjects:
- kind: ServiceAccount
- namespace: orano2
+ namespace: oran-o2
name: {{ .Values.o2ims.serviceaccountname }}
roleRef:
apiGroup: rbac.authorization.k8s.io
memory: 2Gi
global:
- namespace: orano2
+ namespace: oran-o2
o2ims:
- serviceaccountname: admin-orano2
+ serviceaccountname: admin-oran-o2
image:
repository: registry.local:9001/admin/o2imsdms
tag: 0.1.1
echo "source <(helm completion bash)" >> ~/.bashrc
OAM_IP=<INF OAM IP>
- NAMESPACE=orano2
+ NAMESPACE=oran-o2
TOKEN_DATA=<TOKEN_DATA from INF>
USER="admin-user"
.. code:: shell
- export NAMESPACE=orano2
+ export NAMESPACE=oran-o2
kubectl create ns ${NAMESPACE}
# default kube config location is ~/.kube/config
cat <<EOF>o2service-override.yaml
o2ims:
- imagePullSecrets: admin-orano2-registry-secret
+ serviceaccountname: admin-oran-o2
image:
repository: nexus3.o-ran-sc.org:10004/o-ran-sc/pti-o2imsdms
tag: 1.0.0