3 To accommodate the git repo access issue, the cgts-client and distributed client are
4 cloned into temp before docker building
9 git clone --depth 1 --branch master https://opendev.org/starlingx/config.git
10 git clone --depth 1 --branch master https://opendev.org/starlingx/distcloud-client.git
13 patch -p1 < ../../cgtsclient-insecure.patch
25 source ./admin_openrc.sh
26 export |grep OS_AUTH_URL
27 export |grep OS_USERNAME
28 export |grep OS_PASSWORD
30 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration
33 ## Running the tests with a O-Cloud
35 Prerequisite: in case of testing against real ocloud, download openrc file from ocloud dashboard, e.g.
39 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration-ocloud
41 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/integration-ocloud --log-level=DEBUG --log-file=/test
45 ## Tear down containers
48 docker-compose down --remove-orphans
51 ## Test with local virtualenv
54 python3.8 -m venv .venv && source .venv/bin/activate
55 pip install -r requirements.txt -c constraints.txt
56 pip install -r requirements-test.txt
58 # pip install -e o2dms -e o2common
60 pytest tests/integration
65 Test O2DMS with docker-compose
66 ==============================
68 ## setup account over INF and get token
72 NAMESPACE="kube-system"
74 cat <<EOF > admin-login.yaml
79 namespace: kube-system
81 apiVersion: rbac.authorization.k8s.io/v1
82 kind: ClusterRoleBinding
86 apiGroup: rbac.authorization.k8s.io
90 - kind: ServiceAccount
92 namespace: kube-system
94 kubectl apply -f admin-login.yaml
95 TOKEN_DATA=$(kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep ${USER} | awk '{print $1}') | grep "token:" | awk '{print $2}')
99 ## setup remote cli to access kubenetes cluster over INF
102 sudo apt-get install -y apt-transport-https
103 echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | \
104 sudo tee -a /etc/apt/sources.list.d/kubernetes.list
106 sudo apt-get install -y kubectl
108 source <(kubectl completion bash) # setup autocomplete in bash into the current shell, bash-completion package should be installed first.
109 echo "source <(kubectl completion bash)" >> ~/.bashrc # add autocomplete permanently to your bash shell.
111 https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz
112 tar xvf helm-v3.5.3-linux-amd64.tar.gz
113 sudo cp linux-amd64/helm /usr/local/bin
115 source <(helm completion bash)
116 echo "source <(helm completion bash)" >> ~/.bashrc
120 TOKEN_DATA=<TOKEN_DATA from INF>
124 kubectl config set-cluster inf-cluster --server=https://${OAM_IP}:6443 --insecure-skip-tls-verify
125 kubectl config set-credentials ${USER} --token=$TOKEN_DATA
126 kubectl config set-context ${USER}@inf-cluster --cluster=inf-cluster --user ${USER} --namespace=${NAMESPACE}
127 kubectl config use-context ${USER}@inf-cluster
134 ## setup local repo: o2imsrepo
137 helm repo add chartmuseum https://chartmuseum.github.io/charts
139 helm pull chartmuseum/chartmuseum # download chartmuseum-3.4.0.tgz to local
140 tar zxvf chartmuseum-3.4.0.tgz
142 export NODE_IP=<INF OAM IP>
144 cat <<EOF>chartmuseum-override.yaml
153 helm install chartmuseumrepo chartmuseum/chartmuseum -f chartmuseum-override.yaml
157 helm repo add o2imsrepo http://${NODE_IP}:30330
160 helm repo add bitnami https://charts.bitnami.com/bitnami
163 helm pull bitnami/mysql
164 helm push mysql-8.8.16.tgz o2imsrepo
167 helm install my-release o2imsrepo/mysql
175 ## Verify CFW over INF: Test with cnf firewall-host-netdevice
177 ## Setup host netdevice over INF
180 ssh sysadmin@<inf oam IP>
181 sudo ip link add name veth11 type veth peer name veth12
182 sudo ip link add name veth21 type veth peer name veth22
183 sudo ip link |grep veth
188 ## verify CNF over INF
190 git clone https://github.com/biny993/firewall-host-netdevice.git
192 cat <<EOF> cfw-hostdev-override.yaml
197 pullPolicy: IfNotPresent
204 #global vars for parent and subcharts.
207 unprotectedNetPortVpg: veth11
208 unprotectedNetPortVfw: veth12
209 unprotectedNetCidr: 10.10.1.0/24
210 unprotectedNetGwIp: 10.10.1.1
212 protectedNetPortVfw: veth21
213 protectedNetPortVsn: veth22
214 protectedNetCidr: 10.10.2.0/24
215 protectedNetGwIp: 10.10.2.1
217 vfwPrivateIp0: 10.10.1.1
218 vfwPrivateIp1: 10.10.2.1
220 vpgPrivateIp0: 10.10.1.2
222 vsnPrivateIp0: 10.10.2.2
226 helm install cfw1 firewall-host-netdevice -f cfw-hostdev-override.yaml
231 ## push repo to o2imsrepo
234 tar -zcvf firewall-host-netdevice-1.0.0.tgz firewall-host-netdevice/
235 helm push firewall-host-netdevice-1.0.0.tgz o2imsrepo
237 helm search repo firewall
239 helm install cfw1 o2imsrepo/firewall-host-netdevice -f cfw-hostdev-override.yaml
244 ## build docker image for o2 services
251 ## bootstrap o2 service with docker-compose
254 mkdir -p temp/kubeconfig/
255 cp <your .kube/config> temp/kubeconfig/
257 source ./admin_openrc.sh
258 export K8S_KUBECONFIG=/etc/kubeconfig/config
260 docker logs -f o2_redis_pubsub_1
264 ## simiulate SMO to deploy CFW
268 curl --location --request GET 'http://localhost:5005/o2ims_infrastructureInventory/v1/deploymentManagers'
269 export dmsId=<DMS ID>
270 curl --location --request POST 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor' \
271 --header 'Content-Type: application/json' \
274 "description": "demo nf deployment descriptor",
275 "artifactRepoUrl": "http://128.224.115.15:30330",
276 "artifactName": "firewall-host-netdevice",
278 "{\n \"image\": {\n \"repository\": \"ubuntu\",\n \"tag\": 18.04,\n \"pullPolicy\": \"IfNotPresent\"\n },\n \"resources\": {\n \"cpu\": 2,\n \"memory\": \"2Gi\",\n \"hugepage\": \"256Mi\",\n \"unprotectedNetPortVpg\": \"veth11\",\n \"unprotectedNetPortVfw\": \"veth12\",\n \"unprotectedNetCidr\": \"10.10.1.0/24\",\n \"unprotectedNetGwIp\": \"10.10.1.1\",\n \"protectedNetPortVfw\": \"veth21\",\n \"protectedNetPortVsn\": \"veth22\",\n \"protectedNetCidr\": \"10.10.2.0/24\",\n \"protectedNetGwIp\": \"10.10.2.1\",\n \"vfwPrivateIp0\": \"10.10.1.1\",\n \"vfwPrivateIp1\": \"10.10.2.1\",\n \"vpgPrivateIp0\": \"10.10.1.2\",\n \"vsnPrivateIp0\": \"10.10.2.2\"\n }\n}",
279 "outputParams": "{\"output1\": 100}"
282 curl --location --request GET 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor'
284 curl --location --request POST 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment' \
285 --header 'Content-Type: application/json' \
288 "description": "demo nf deployment",
289 "descriptorId": "<NfDeploymentDescriptorId>",
290 "parentDeploymentId": ""
293 curl --location --request GET 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment'
295 export NfDeploymentId=<NfDeployment Id>
302 docker logs -f o2_redis_pubsub_1
304 kubectl logs -f cfw100-sink-host-netdevice-59bf6fbd4b-845p4
307 ## watch traffic stats
309 open browswer with url: http://<NODE_IP>:30667
315 curl --location --request DELETE 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment/${NfDeploymentId}'