1 ## Building containers
\r
3 To accommodate the git repo access issue, the cgts-client and distributed client are
\r
4 cloned into temp before docker building
\r
9 git clone --depth 1 --branch master https://opendev.org/starlingx/config.git
\r
10 git clone --depth 1 --branch master https://opendev.org/starlingx/distcloud-client.git
\r
15 docker-compose build
\r
18 ## Running the tests
\r
22 source ./admin_openrc.sh
\r
23 export |grep OS_AUTH_URL
\r
24 export |grep OS_USERNAME
\r
25 export |grep OS_PASSWORD
\r
26 docker-compose up -d
\r
27 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration
\r
30 ## Running the tests with a O-Cloud
\r
32 Prerequisite: in case of testing against real ocloud, download openrc file from ocloud dashboard, e.g.
\r
36 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/unit /tests/integration-ocloud
\r
38 docker-compose run --rm --no-deps --entrypoint=pytest api /tests/integration-ocloud --log-level=DEBUG --log-file=/test
\r
42 ## Tear down containers
\r
45 docker-compose down --remove-orphans
\r
48 ## Test with local virtualenv
\r
51 python3.8 -m venv .venv && source .venv/bin/activate
\r
52 pip install -r requirements.txt -c constraints.txt
\r
53 pip install -r requirements-test.txt
\r
54 pip install -e o2ims
\r
55 # pip install -e o2dms -e o2common
\r
57 pytest tests/integration
\r
62 Test O2DMS with docker-compose
\r
63 ==============================
\r
65 ## setup account over INF and get token
\r
69 NAMESPACE="kube-system"
\r
71 cat <<EOF > admin-login.yaml
\r
73 kind: ServiceAccount
\r
76 namespace: kube-system
\r
78 apiVersion: rbac.authorization.k8s.io/v1
\r
79 kind: ClusterRoleBinding
\r
83 apiGroup: rbac.authorization.k8s.io
\r
87 - kind: ServiceAccount
\r
89 namespace: kube-system
\r
91 kubectl apply -f admin-login.yaml
\r
92 TOKEN_DATA=$(kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep ${USER} | awk '{print $1}') | grep "token:" | awk '{print $2}')
\r
96 ## setup remote cli to access kubenetes cluster over INF
\r
99 sudo apt-get install -y apt-transport-https
\r
100 echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | \
\r
101 sudo tee -a /etc/apt/sources.list.d/kubernetes.list
\r
102 sudo apt-get update
\r
103 sudo apt-get install -y kubectl
\r
105 source <(kubectl completion bash) # setup autocomplete in bash into the current shell, bash-completion package should be installed first.
\r
106 echo "source <(kubectl completion bash)" >> ~/.bashrc # add autocomplete permanently to your bash shell.
\r
108 https://get.helm.sh/helm-v3.5.3-linux-amd64.tar.gz
\r
109 tar xvf helm-v3.5.3-linux-amd64.tar.gz
\r
110 sudo cp linux-amd64/helm /usr/local/bin
\r
112 source <(helm completion bash)
\r
113 echo "source <(helm completion bash)" >> ~/.bashrc
\r
115 OAM_IP=<INF OAM IP>
\r
117 TOKEN_DATA=<TOKEN_DATA from INF>
\r
121 kubectl config set-cluster inf-cluster --server=https://${OAM_IP}:6443 --insecure-skip-tls-verify
\r
122 kubectl config set-credentials ${USER} --token=$TOKEN_DATA
\r
123 kubectl config set-context ${USER}@inf-cluster --cluster=inf-cluster --user ${USER} --namespace=${NAMESPACE}
\r
124 kubectl config use-context ${USER}@inf-cluster
\r
126 kubectl get pods -A
\r
131 ## setup local repo: o2imsrepo
\r
134 helm repo add chartmuseum https://chartmuseum.github.io/charts
\r
136 helm pull chartmuseum/chartmuseum # download chartmuseum-3.4.0.tgz to local
\r
137 tar zxvf chartmuseum-3.4.0.tgz
\r
139 export NODE_IP=<INF OAM IP>
\r
141 cat <<EOF>chartmuseum-override.yaml
\r
150 helm install chartmuseumrepo chartmuseum/chartmuseum -f chartmuseum-override.yaml
\r
152 Kubectl get services
\r
154 helm repo add o2imsrepo http://${NODE_IP}:30330
\r
157 helm repo add bitnami https://charts.bitnami.com/bitnami
\r
160 helm pull bitnami/mysql
\r
161 helm push mysql-8.8.16.tgz o2imsrepo
\r
164 helm install my-release o2imsrepo/mysql
\r
166 helm del my-release
\r
172 ## Verify CFW over INF: Test with cnf firewall-host-netdevice
\r
174 ## Setup host netdevice over INF
\r
177 ssh sysadmin@<inf oam IP>
\r
178 sudo ip link add name veth11 type veth peer name veth12
\r
179 sudo ip link add name veth21 type veth peer name veth22
\r
180 sudo ip link |grep veth
\r
185 ## verify CNF over INF
\r
187 git clone https://github.com/biny993/firewall-host-netdevice.git
\r
189 cat <<EOF> cfw-hostdev-override.yaml
\r
194 pullPolicy: IfNotPresent
\r
201 #global vars for parent and subcharts.
\r
204 unprotectedNetPortVpg: veth11
\r
205 unprotectedNetPortVfw: veth12
\r
206 unprotectedNetCidr: 10.10.1.0/24
\r
207 unprotectedNetGwIp: 10.10.1.1
\r
209 protectedNetPortVfw: veth21
\r
210 protectedNetPortVsn: veth22
\r
211 protectedNetCidr: 10.10.2.0/24
\r
212 protectedNetGwIp: 10.10.2.1
\r
214 vfwPrivateIp0: 10.10.1.1
\r
215 vfwPrivateIp1: 10.10.2.1
\r
217 vpgPrivateIp0: 10.10.1.2
\r
219 vsnPrivateIp0: 10.10.2.2
\r
223 helm install cfw1 firewall-host-netdevice -f cfw-hostdev-override.yaml
\r
228 ## push repo to o2imsrepo
\r
231 tar -zcvf firewall-host-netdevice-1.0.0.tgz firewall-host-netdevice/
\r
232 helm push firewall-host-netdevice-1.0.0.tgz o2imsrepo
\r
234 helm search repo firewall
\r
236 helm install cfw1 o2imsrepo/firewall-host-netdevice -f cfw-hostdev-override.yaml
\r
241 ## build docker image for o2 services
\r
244 docker-compose build
\r
248 ## bootstrap o2 service with docker-compose
\r
251 mkdir -p temp/kubeconfig/
\r
252 cp <your .kube/config> temp/kubeconfig/
\r
254 source ./admin_openrc.sh
\r
255 export K8S_KUBECONFIG=/etc/kubeconfig/config
\r
256 docker-compose up -d
\r
257 docker logs -f o2_redis_pubsub_1
\r
261 ## simiulate SMO to deploy CFW
\r
265 curl --location --request GET 'http://localhost:5005/o2ims_infrastructureInventory/v1/deploymentManagers'
\r
266 export dmsId=<DMS ID>
\r
267 curl --location --request POST 'http://localhost:5005/o2dms/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor' \
\r
268 --header 'Content-Type: application/json' \
\r
270 "name": "cfwdesc1",
\r
271 "description": "demo nf deployment descriptor",
\r
272 "artifactRepoUrl": "http://128.224.115.15:30330",
\r
273 "artifactName": "firewall-host-netdevice",
\r
275 "{\n \"image\": {\n \"repository\": \"ubuntu\",\n \"tag\": 18.04,\n \"pullPolicy\": \"IfNotPresent\"\n },\n \"resources\": {\n \"cpu\": 2,\n \"memory\": \"2Gi\",\n \"hugepage\": \"256Mi\",\n \"unprotectedNetPortVpg\": \"veth11\",\n \"unprotectedNetPortVfw\": \"veth12\",\n \"unprotectedNetCidr\": \"10.10.1.0/24\",\n \"unprotectedNetGwIp\": \"10.10.1.1\",\n \"protectedNetPortVfw\": \"veth21\",\n \"protectedNetPortVsn\": \"veth22\",\n \"protectedNetCidr\": \"10.10.2.0/24\",\n \"protectedNetGwIp\": \"10.10.2.1\",\n \"vfwPrivateIp0\": \"10.10.1.1\",\n \"vfwPrivateIp1\": \"10.10.2.1\",\n \"vpgPrivateIp0\": \"10.10.1.2\",\n \"vsnPrivateIp0\": \"10.10.2.2\"\n }\n}",
\r
276 "outputParams": "{\"output1\": 100}"
\r
279 curl --location --request GET 'http://localhost:5005/o2dms/${dmsId}/O2dms_DeploymentLifecycle/NfDeploymentDescriptor'
\r
281 curl --location --request POST 'http://localhost:5005/o2dms/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment' \
\r
282 --header 'Content-Type: application/json' \
\r
285 "description": "demo nf deployment",
\r
286 "descriptorId": "<NfDeploymentDescriptorId>",
\r
287 "parentDeploymentId": ""
\r
290 curl --location --request GET 'http://localhost:5005/o2dms/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment'
\r
292 export NfDeploymentId=<NfDeployment Id>
\r
299 docker logs -f o2_redis_pubsub_1
\r
301 kubectl logs -f cfw100-sink-host-netdevice-59bf6fbd4b-845p4
\r
304 ## watch traffic stats
\r
306 open browswer with url: http://<NODE_IP>:30667
\r
312 curl --location --request DELETE 'http://localhost:5005/o2dms/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment/${NfDeploymentId}'
\r