3 To accommodate the git repo access issue, the cgts-client and distributed client are
4 cloned into temp before docker building
9 git clone --branch master https://opendev.org/starlingx/config.git
10 git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/distcloud-client.git
11 git clone --depth 1 --branch master https://opendev.org/starlingx/fault.git
14 patch -p1 < ../../cgtsclient-insecure.patch
26 source ./admin_openrc.sh
27 export |grep OS_AUTH_URL
28 export |grep OS_USERNAME
29 export |grep OS_PASSWORD
31 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration
34 ## Running the tests with a O-Cloud
36 Prerequisite: in case of testing against real ocloud, download openrc file from ocloud dashboard, e.g.
40 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration-ocloud
42 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/integration-ocloud --log-level=DEBUG --log-file=/test
46 ## Tear down containers
49 docker-compose down --remove-orphans
52 ## Test with local virtualenv
55 python3.8 -m venv .venv && source .venv/bin/activate
56 pip install -r requirements.txt -c constraints.txt
57 pip install -r requirements-test.txt
59 # pip install -e o2dms -e o2common
61 pytest tests/integration
66 Test O2DMS with docker-compose
67 ==============================
69 ## setup account over INF and get token
73 NAMESPACE="kube-system"
75 cat <<EOF > admin-login.yaml
80 namespace: kube-system
82 apiVersion: rbac.authorization.k8s.io/v1
83 kind: ClusterRoleBinding
87 apiGroup: rbac.authorization.k8s.io
91 - kind: ServiceAccount
93 namespace: kube-system
95 kubectl apply -f admin-login.yaml
96 TOKEN_DATA=$(kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep ${USER} | awk '{print $1}') | grep "token:" | awk '{print $2}')
100 ## setup remote cli to access kubenetes cluster over INF
103 sudo apt-get install -y apt-transport-https
104 echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | \
105 sudo tee -a /etc/apt/sources.list.d/kubernetes.list
107 sudo apt-get install -y kubectl
109 source <(kubectl completion bash) # setup autocomplete in bash into the current shell, bash-completion package should be installed first.
110 echo "source <(kubectl completion bash)" >> ~/.bashrc # add autocomplete permanently to your bash shell.
112 https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz
113 tar xvf helm-v3.5.3-linux-amd64.tar.gz
114 sudo cp linux-amd64/helm /usr/local/bin
116 source <(helm completion bash)
117 echo "source <(helm completion bash)" >> ~/.bashrc
121 TOKEN_DATA=<TOKEN_DATA from INF>
125 kubectl config set-cluster inf-cluster --server=https://${OAM_IP}:6443 --insecure-skip-tls-verify
126 kubectl config set-credentials ${USER} --token=$TOKEN_DATA
127 kubectl config set-context ${USER}@inf-cluster --cluster=inf-cluster --user ${USER} --namespace=${NAMESPACE}
128 kubectl config use-context ${USER}@inf-cluster
135 ## setup local repo: o2imsrepo
138 helm repo add chartmuseum https://chartmuseum.github.io/charts
140 helm pull chartmuseum/chartmuseum # download chartmuseum-3.4.0.tgz to local
141 tar zxvf chartmuseum-3.4.0.tgz
143 export NODE_IP=<INF OAM IP>
145 cat <<EOF>chartmuseum-override.yaml
154 helm install chartmuseumrepo chartmuseum/chartmuseum -f chartmuseum-override.yaml
158 helm repo add o2imsrepo http://${NODE_IP}:30330
161 helm repo add bitnami https://charts.bitnami.com/bitnami
164 helm pull bitnami/mysql
165 helm push mysql-8.8.16.tgz o2imsrepo
168 helm install my-release o2imsrepo/mysql
176 ## Verify CFW over INF: Test with cnf firewall-host-netdevice
178 ## Setup host netdevice over INF
181 ssh sysadmin@<inf oam IP>
182 sudo ip link add name veth11 type veth peer name veth12
183 sudo ip link add name veth21 type veth peer name veth22
184 sudo ip link |grep veth
189 ## verify CNF over INF
191 git clone https://github.com/biny993/firewall-host-netdevice.git
193 cat <<EOF> cfw-hostdev-override.yaml
198 pullPolicy: IfNotPresent
205 #global vars for parent and subcharts.
208 unprotectedNetPortVpg: veth11
209 unprotectedNetPortVfw: veth12
210 unprotectedNetCidr: 10.10.1.0/24
211 unprotectedNetGwIp: 10.10.1.1
213 protectedNetPortVfw: veth21
214 protectedNetPortVsn: veth22
215 protectedNetCidr: 10.10.2.0/24
216 protectedNetGwIp: 10.10.2.1
218 vfwPrivateIp0: 10.10.1.1
219 vfwPrivateIp1: 10.10.2.1
221 vpgPrivateIp0: 10.10.1.2
223 vsnPrivateIp0: 10.10.2.2
227 helm install cfw1 firewall-host-netdevice -f cfw-hostdev-override.yaml
232 ## push repo to o2imsrepo
235 tar -zcvf firewall-host-netdevice-1.0.0.tgz firewall-host-netdevice/
236 helm push firewall-host-netdevice-1.0.0.tgz o2imsrepo
238 helm search repo firewall
240 helm install cfw1 o2imsrepo/firewall-host-netdevice -f cfw-hostdev-override.yaml
245 ## build docker image for o2 services
252 ## bootstrap o2 service with docker-compose
255 mkdir -p temp/kubeconfig/
256 cp <your .kube/config> temp/kubeconfig/
258 source ./admin_openrc.sh
259 export K8S_KUBECONFIG=/etc/kubeconfig/config
261 docker logs -f o2_redis_pubsub_1
265 ## simiulate SMO to deploy CFW
269 curl --location --request GET 'http://localhost:5005/o2ims_infrastructureInventory/v1/deploymentManagers'
270 export dmsId=<DMS ID>
271 curl --location --request POST 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor' \
272 --header 'Content-Type: application/json' \
275 "description": "demo nf deployment descriptor",
276 "artifactRepoUrl": "http://128.224.115.15:30330",
277 "artifactName": "firewall-host-netdevice",
279 "{\n \"image\": {\n \"repository\": \"ubuntu\",\n \"tag\": 18.04,\n \"pullPolicy\": \"IfNotPresent\"\n },\n \"resources\": {\n \"cpu\": 2,\n \"memory\": \"2Gi\",\n \"hugepage\": \"256Mi\",\n \"unprotectedNetPortVpg\": \"veth11\",\n \"unprotectedNetPortVfw\": \"veth12\",\n \"unprotectedNetCidr\": \"10.10.1.0/24\",\n \"unprotectedNetGwIp\": \"10.10.1.1\",\n \"protectedNetPortVfw\": \"veth21\",\n \"protectedNetPortVsn\": \"veth22\",\n \"protectedNetCidr\": \"10.10.2.0/24\",\n \"protectedNetGwIp\": \"10.10.2.1\",\n \"vfwPrivateIp0\": \"10.10.1.1\",\n \"vfwPrivateIp1\": \"10.10.2.1\",\n \"vpgPrivateIp0\": \"10.10.1.2\",\n \"vsnPrivateIp0\": \"10.10.2.2\"\n }\n}",
280 "outputParams": "{\"output1\": 100}"
283 curl --location --request GET 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor'
285 curl --location --request POST 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment' \
286 --header 'Content-Type: application/json' \
289 "description": "demo nf deployment",
290 "descriptorId": "<NfDeploymentDescriptorId>",
291 "parentDeploymentId": ""
294 curl --location --request GET 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment'
296 export NfDeploymentId=<NfDeployment Id>
303 docker logs -f o2_redis_pubsub_1
305 kubectl logs -f cfw100-sink-host-netdevice-59bf6fbd4b-845p4
308 ## watch traffic stats
310 open browswer with url: http://<NODE_IP>:30667
316 curl --location --request DELETE 'http://localhost:5005/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment/${NfDeploymentId}'