Adaptation of test env to helm chart 08/7008/1
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Mon, 8 Nov 2021 09:25:07 +0000 (10:25 +0100)
committerBjörn Magnusson <bjorn.magnusson@est.tech>
Mon, 8 Nov 2021 10:03:59 +0000 (10:03 +0000)
Restructuring to use http proxy for all access to docker containers

Adding tests for dmaap adapter and mediator

Issue-ID: NONRTRIC-618

Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I43799771d73140c7abbc63db9f9b28bb353474a1

93 files changed:
test/auto-test/FTC1.sh
test/auto-test/FTC10.sh
test/auto-test/FTC100.sh
test/auto-test/FTC110.sh
test/auto-test/FTC1100.sh
test/auto-test/FTC150.sh
test/auto-test/FTC1800.sh
test/auto-test/FTC2001.sh
test/auto-test/FTC2002.sh
test/auto-test/FTC2003.sh [new file with mode: 0755]
test/auto-test/FTC300.sh
test/auto-test/FTC3000.sh [new file with mode: 0755]
test/auto-test/FTC310.sh
test/auto-test/FTC350.sh
test/auto-test/FTC800.sh
test/auto-test/FTC805.sh
test/auto-test/FTC810.sh
test/auto-test/FTC850.sh
test/auto-test/FTC900.sh
test/auto-test/FTC_HELM_E_RELEASE.sh [moved from test/auto-test/FTC_HELM_RECIPE_E_RELEASE.sh with 76% similarity]
test/auto-test/FTC_HELM_RECIPE_CHERRY.sh [deleted file]
test/auto-test/FTC_HELM_RECIPE_D_RELEASE.sh [deleted file]
test/auto-test/ONAP_UC.sh
test/auto-test/PM_DEMO.sh
test/auto-test/PM_EI_DEMO.sh
test/auto-test/README.md
test/auto-test/override_ftc_helm_e_release.sh [new file with mode: 0644]
test/auto-test/startMR.sh
test/auto-test/testdata/dmaap-adapter/job-template.json [new file with mode: 0644]
test/common/README.md
test/common/agent_api_functions.sh
test/common/api_curl.sh
test/common/clean_docker.sh [new file with mode: 0755]
test/common/clean_kube.sh
test/common/consul_cbs_functions.sh
test/common/control_panel_api_functions.sh
test/common/controller_api_functions.sh
test/common/cr_api_functions.sh
test/common/dmaapadp_api_functions.sh [new file with mode: 0644]
test/common/dmaapmed_api_functions.sh [new file with mode: 0644]
test/common/ecs_api_functions.sh
test/common/gateway_api_functions.sh
test/common/http_proxy_api_functions.sh
test/common/kube_proxy_api_functions.sh
test/common/mr_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/pvccleaner_api_functions.sh [new file with mode: 0644]
test/common/rapp_catalogue_api_functions.sh
test/common/ricsimulator_api_functions.sh
test/common/test_env-onap-guilin.sh
test/common/test_env-onap-honolulu.sh
test/common/test_env-onap-istanbul.sh
test/common/test_env-oran-cherry.sh
test/common/test_env-oran-d-release.sh
test/common/test_env-oran-e-release.sh
test/common/testcase_common.sh
test/common/testengine_config.sh
test/cr/Dockerfile
test/cr/README.md
test/cr/app/cr.py
test/cr/basic_test.sh
test/mrstub/app/main.py
test/mrstub/basic_test.sh
test/simulator-group/consul_cbs/docker-compose.yml
test/simulator-group/dmaapadp/.gitignore [new file with mode: 0644]
test/simulator-group/dmaapadp/app.yaml [new file with mode: 0644]
test/simulator-group/dmaapadp/application.yaml [new file with mode: 0644]
test/simulator-group/dmaapadp/application_configuration.json [new file with mode: 0644]
test/simulator-group/dmaapadp/docker-compose.yml [new file with mode: 0644]
test/simulator-group/dmaapadp/svc.yaml [new file with mode: 0644]
test/simulator-group/dmaapmed/.gitignore [new file with mode: 0644]
test/simulator-group/dmaapmed/app.yaml [new file with mode: 0644]
test/simulator-group/dmaapmed/docker-compose.yml [new file with mode: 0644]
test/simulator-group/dmaapmed/svc.yaml [new file with mode: 0644]
test/simulator-group/dmaapmed/type_config.json [new file with mode: 0644]
test/simulator-group/dmaapmr/app.yaml
test/simulator-group/dmaapmr/docker-compose.yml
test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt2/mr/cadi.properties [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt2/mr/logback.xml [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf [new file with mode: 0644]
test/simulator-group/dmaapmr/svc.yaml
test/simulator-group/kubeproxy/docker-compose.yml [new file with mode: 0644]
test/simulator-group/pvc-cleaner/pvc-cleaner.yaml
test/simulator-group/ric/app.yaml
test/simulator-group/ric/docker-compose.yml
test/simulator-group/ric/svc.yaml
test/simulator-group/sdnc/app.yaml
test/simulator-group/sdnc/app2.yaml
test/simulator-group/sdnc/docker-compose-2.yml
test/simulator-group/sdnc/svc.yaml

index 28c84e5..5d718b0 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="Sanity test, create service and then create,update and delete a policy using http/https and Agent REST/DMAAP with/without SDNC controller"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -71,9 +71,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         if [ $__httpx == "HTTPS" ]; then
             use_agent_rest_https
@@ -87,7 +85,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         # Create service to be able to receive events when rics becomes available
         # Must use rest towards the agent since dmaap is not configured yet
-        api_put_service 201 "ric-registration" 0 "$CR_SERVICE_PATH/ric-registration"
+        api_put_service 201 "ric-registration" 0 "$CR_SERVICE_APP_PATH/ric-registration"
 
         if [ $__httpx == "HTTPS" ]; then
             use_cr_https
@@ -190,7 +188,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         echo "##### Service registry and supervision #####"
         echo "############################################"
 
-        api_put_service 201 "serv1" 1000 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "serv1" 1000 "$CR_SERVICE_APP_PATH/1"
 
         api_get_service_ids 200 "serv1" "ric-registration"
 
@@ -211,7 +209,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         echo "############################################"
 
         if [ "$PMS_VERSION" == "V2" ]; then
-            notificationurl=$CR_SERVICE_PATH"/test"
+            notificationurl=$CR_SERVICE_APP_PATH"/test"
         else
             notificationurl=""
         fi
index 567facc..625346b 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Basic use case, register service, create/update policy, delete policy, de-register service using both STD and OSC interface while mixing REST and Dmaap"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=" MR CR PA RICSIM CP KUBEPROXY NGW"
@@ -61,9 +61,7 @@ use_agent_rest_http
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 start_ric_simulators  ricsim_g1 3 OSC_2.1.0
 
@@ -123,14 +121,14 @@ fi
 # Create policies
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
 
 use_agent_rest_http
 
-api_put_service 201 "service1" 3600 "$CR_SERVICE_PATH/1"
+api_put_service 201 "service1" 3600 "$CR_SERVICE_APP_PATH/1"
 
 api_put_policy 201 "service1" ricsim_g1_1 1 2000 NOTRANSIENT $notificationurl testdata/OSC/pi1_template.json 1
 
@@ -175,7 +173,7 @@ fi
 #Update policies
 use_agent_rest_http
 
-api_put_service 200 "service1" 3600 "$CR_SERVICE_PATH/1"
+api_put_service 200 "service1" 3600 "$CR_SERVICE_APP_PATH/1"
 
 api_put_policy 200 "service1" ricsim_g1_1 1 2000 NOTRANSIENT $notificationurl testdata/OSC/pi1_template.json 1
 
index ff9b901..ac6f8d5 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="Full agent API walkthrough using agent REST/DMAAP and with/without SDNC A1 Controller"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -73,9 +73,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         if [ $__httpx == "HTTPS" ]; then
             use_cr_https
@@ -91,7 +89,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         # Create service to be able to receive events when rics becomes available
         # Must use rest towards the agent since dmaap is not configured yet
-        api_put_service 201 "ric-registration" 0 "$CR_SERVICE_PATH/ric-registration"
+        api_put_service 201 "ric-registration" 0 "$CR_SERVICE_APP_PATH/ric-registration"
 
 
         if [ $__httpx == "HTTPS" ]; then
@@ -196,14 +194,14 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         api_get_services 404 "service1"
 
-        api_put_service 201 "service1" 1000 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "service1" 1000 "$CR_SERVICE_APP_PATH/1"
 
-        api_put_service 200 "service1" 2000 "$CR_SERVICE_PATH/1"
+        api_put_service 200 "service1" 2000 "$CR_SERVICE_APP_PATH/1"
 
 
-        api_put_service 400 "service2" -1 "$CR_SERVICE_PATH/2"
+        api_put_service 400 "service2" -1 "$CR_SERVICE_APP_PATH/2"
 
-        api_put_service 400 "service2" "wrong" "$CR_SERVICE_PATH/2"
+        api_put_service 400 "service2" "wrong" "$CR_SERVICE_APP_PATH/2"
 
         api_put_service 400 "service2" 100 "/test"
 
@@ -211,20 +209,20 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         api_put_service 201 "service2" 300 "ftp://localhost:80/test"
 
-        api_get_services 200 "service1" "service1" 2000 "$CR_SERVICE_PATH/1"
+        api_get_services 200 "service1" "service1" 2000 "$CR_SERVICE_APP_PATH/1"
 
         api_get_service_ids 200 "service1" "service2" "ric-registration"
 
 
-        api_put_service 201 "service3" 5000 "$CR_SERVICE_PATH/3"
+        api_put_service 201 "service3" 5000 "$CR_SERVICE_APP_PATH/3"
 
 
         api_get_service_ids 200 "service1" "service2" "service3" "ric-registration"
 
 
-        api_get_services 200 "service1" "service1" 2000 "$CR_SERVICE_PATH/1"
+        api_get_services 200 "service1" "service1" 2000 "$CR_SERVICE_APP_PATH/1"
 
-        api_get_services 200 NOSERVICE "service1" 2000 "$CR_SERVICE_PATH/1" "service2" 300 "ftp://localhost:80/test" "service3" 5000 "$CR_SERVICE_PATH/3"  "ric-registration" 0 "$CR_SERVICE_PATH/ric-registration"
+        api_get_services 200 NOSERVICE "service1" 2000 "$CR_SERVICE_APP_PATH/1" "service2" 300 "ftp://localhost:80/test" "service3" 5000 "$CR_SERVICE_APP_PATH/3"  "ric-registration" 0 "$CR_SERVICE_APP_PATH/ric-registration"
 
         api_get_services 200
 
@@ -253,7 +251,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         api_get_service_ids 200 "service2" "service3" "ric-registration"
 
 
-        api_put_service 201 "service1" 50 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "service1" 50 "$CR_SERVICE_APP_PATH/1"
 
         api_get_service_ids 200 "service1" "service2" "service3"  "ric-registration"
 
@@ -388,10 +386,10 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
 
 
-        api_put_service 201 "service10" 3600 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "service10" 3600 "$CR_SERVICE_APP_PATH/1"
 
         if [ "$PMS_VERSION" == "V2" ]; then
-            notificationurl=$CR_SERVICE_PATH"/test"
+            notificationurl=$CR_SERVICE_APP_PATH"/test"
         else
             notificationurl=""
         fi
index 3690e9c..e3b96a5 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="Testing of service registration timeouts and keepalive"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM KUBEPROXY NGW"
@@ -61,9 +61,7 @@ use_agent_rest_http
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 start_ric_simulators ricsim_g1 1  OSC_2.1.0
 start_ric_simulators ricsim_g2 1  STD_1.1.3
@@ -109,60 +107,60 @@ if [ "$PMS_VERSION" == "V2" ]; then
     sim_print ricsim_g3_1 interface
 fi
 
-api_put_service 201 "service1" 15 "$CR_SERVICE_PATH/service1"
+api_put_service 201 "service1" 15 "$CR_SERVICE_APP_PATH/service1"
 
-api_get_services 200 "service1" "service1" 15 "$CR_SERVICE_PATH/service1"
+api_get_services 200 "service1" "service1" 15 "$CR_SERVICE_APP_PATH/service1"
 
-api_put_service 201 "service2" 120 "$CR_SERVICE_PATH/service2"
+api_put_service 201 "service2" 120 "$CR_SERVICE_APP_PATH/service2"
 
-api_get_services 200 "service2" "service2" 120 "$CR_SERVICE_PATH/service2"
+api_get_services 200 "service2" "service2" 120 "$CR_SERVICE_APP_PATH/service2"
 
-api_put_service 200 "service1" 50 "$CR_SERVICE_PATH/service1"
-api_put_service 200 "service2" 180 "$CR_SERVICE_PATH/service2"
+api_put_service 200 "service1" 50 "$CR_SERVICE_APP_PATH/service1"
+api_put_service 200 "service2" 180 "$CR_SERVICE_APP_PATH/service2"
 
-api_get_services 200 "service1" "service1" 50 "$CR_SERVICE_PATH/service1"
-api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_PATH/service2"
+api_get_services 200 "service1" "service1" 50 "$CR_SERVICE_APP_PATH/service1"
+api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_APP_PATH/service2"
 
 api_get_service_ids 200 "service1" "service2"
 
 sleep_wait 30 "Waiting for keep alive timeout"
 
-api_get_services 200 "service1" "service1" 50 "$CR_SERVICE_PATH/service1"
-api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_PATH/service2"
+api_get_services 200 "service1" "service1" 50 "$CR_SERVICE_APP_PATH/service1"
+api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_APP_PATH/service2"
 
 sleep_wait 100 "Waiting for keep alive timeout"
 
 api_get_services 404 "service1"
-api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_PATH/service2"
+api_get_services 200 "service2" "service2" 180 "$CR_SERVICE_APP_PATH/service2"
 
 api_delete_services 204 "service2"
 
 api_get_services 404 "service1"
 api_get_services 404 "service2"
 
-api_put_service 201 "service3" 60 "$CR_SERVICE_PATH/service3"
+api_put_service 201 "service3" 60 "$CR_SERVICE_APP_PATH/service3"
 
-api_get_services 200 "service3" "service3" 60 "$CR_SERVICE_PATH/service3"
+api_get_services 200 "service3" "service3" 60 "$CR_SERVICE_APP_PATH/service3"
 
 sleep_wait 30 "Waiting for keep alive timeout"
 
-api_put_service 200 "service3" 60 "$CR_SERVICE_PATH/service3"
+api_put_service 200 "service3" 60 "$CR_SERVICE_APP_PATH/service3"
 
 sleep_wait 100 "Waiting for keep alive timeout"
 
 api_get_services 404 "service3"
 
-api_put_service 201 "service4" 120 "$CR_SERVICE_PATH/service4"
+api_put_service 201 "service4" 120 "$CR_SERVICE_APP_PATH/service4"
 
 sleep_wait 60 "Waiting for keep alive timeout"
 
-api_get_services 200 "service4" "service4" 120 "$CR_SERVICE_PATH/service4"
+api_get_services 200 "service4" "service4" 120 "$CR_SERVICE_APP_PATH/service4"
 
 api_put_services_keepalive 200 "service4"
 
 sleep_wait 90 "Waiting for keep alive timeout"
 
-api_get_services 200 "service4" "service4" 120 "$CR_SERVICE_PATH/service4"
+api_get_services 200 "service4" "service4" 120 "$CR_SERVICE_APP_PATH/service4"
 
 api_delete_services 204 "service4"
 
@@ -185,7 +183,7 @@ api_put_services_keepalive 404 "service3"
 api_put_services_keepalive 404 "service4"
 
 # Policy delete after timeout
-api_put_service 201 "service10" 600 "$CR_SERVICE_PATH/service10"
+api_put_service 201 "service10" 600 "$CR_SERVICE_APP_PATH/service10"
 
 sim_put_policy_type 201 ricsim_g1_1 1 testdata/OSC/sim_1.json
 
@@ -211,7 +209,7 @@ else
 fi
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
@@ -265,7 +263,7 @@ if [ "$PMS_VERSION" == "V2" ]; then
     sim_equal ricsim_g3_1 num_instances 1
 fi
 
-api_put_service 200 "service10" 10 "$CR_SERVICE_PATH/service10"
+api_put_service 200 "service10" 10 "$CR_SERVICE_APP_PATH/service10"
 
 #Wait for service expiry
 api_equal json:policies 0 120
index eb687e8..0e4f4a7 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="ECS full interfaces walkthrough"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS PRODSTUB CR RICSIM CP HTTPPROXY NGW"
+DOCKER_INCLUDED_IMAGES="ECS PRODSTUB CR RICSIM CP HTTPPROXY NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="PRODSTUB CR ECS RICSIM CP HTTPPROXY KUBEPROXY NGW"
@@ -57,9 +57,7 @@ FLAT_A1_EI="1"
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 use_ecs_rest_https
 
@@ -112,25 +110,25 @@ TARGET150="http://localhost:80/target"  # Dummy target, no target for info data
 TARGET160="http://localhost:80/target"  # Dummy target, no target for info data in this env...
 
 #Status callbacks for eijobs
-STATUS1="$CR_SERVICE_PATH/job1-status"
-STATUS2="$CR_SERVICE_PATH/job2-status"
-STATUS3="$CR_SERVICE_PATH/job3-status"
-STATUS8="$CR_SERVICE_PATH/job8-status"
-STATUS10="$CR_SERVICE_PATH/job10-status"
+STATUS1="$CR_SERVICE_APP_PATH/job1-status"
+STATUS2="$CR_SERVICE_APP_PATH/job2-status"
+STATUS3="$CR_SERVICE_APP_PATH/job3-status"
+STATUS8="$CR_SERVICE_APP_PATH/job8-status"
+STATUS10="$CR_SERVICE_APP_PATH/job10-status"
 
 #Status callbacks for infojobs
-INFOSTATUS101="$CR_SERVICE_PATH/info-job101-status"
-INFOSTATUS102="$CR_SERVICE_PATH/info-job102-status"
-INFOSTATUS103="$CR_SERVICE_PATH/info-job103-status"
-INFOSTATUS108="$CR_SERVICE_PATH/info-job108-status"
-INFOSTATUS110="$CR_SERVICE_PATH/info-job110-status"
-INFOSTATUS150="$CR_SERVICE_PATH/info-job150-status"
-INFOSTATUS160="$CR_SERVICE_PATH/info-job160-status"
+INFOSTATUS101="$CR_SERVICE_APP_PATH/info-job101-status"
+INFOSTATUS102="$CR_SERVICE_APP_PATH/info-job102-status"
+INFOSTATUS103="$CR_SERVICE_APP_PATH/info-job103-status"
+INFOSTATUS108="$CR_SERVICE_APP_PATH/info-job108-status"
+INFOSTATUS110="$CR_SERVICE_APP_PATH/info-job110-status"
+INFOSTATUS150="$CR_SERVICE_APP_PATH/info-job150-status"
+INFOSTATUS160="$CR_SERVICE_APP_PATH/info-job160-status"
 
 if [[ "$ECS_FEATURE_LEVEL" == *"TYPE-SUBSCRIPTIONS"* ]]; then
     #Type registration status callbacks
-    TYPESTATUS1="$CR_SERVICE_PATH/type-status1"
-    TYPESTATUS2="$CR_SERVICE_PATH/type-status2"
+    TYPESTATUS1="$CR_SERVICE_APP_PATH/type-status1"
+    TYPESTATUS2="$CR_SERVICE_APP_PATH/type-status2"
 
     ecs_api_idc_put_subscription 201 subscription-id-1 owner1 $TYPESTATUS1
 
index bcd41cb..f011a21 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="Sample tests of the SDNC A1 controller restconf API using http/https (no agent)"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="RICSIM SDNC"
+DOCKER_INCLUDED_IMAGES="RICSIM SDNC KUBEPROXY"
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=" RICSIM SDNC KUBEPROXY"
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
@@ -60,9 +60,7 @@ for __nb_httpx in $NB_TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         start_ric_simulators ricsim_g1 1  OSC_2.1.0
         start_ric_simulators ricsim_g2 1  STD_1.1.3
index bb9ed60..6241f3c 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="ECS Create 10000 jobs (ei and info) restart, test job persistency"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS PRODSTUB CR CP NGW"
+DOCKER_INCLUDED_IMAGES="ECS PRODSTUB CR CP NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="ECS PRODSTUB CP CR KUBEPROXY NGW"
@@ -55,9 +55,7 @@ FLAT_A1_EI="1"
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 use_ecs_rest_http
 
@@ -90,8 +88,8 @@ fi
 
 if [[ "$ECS_FEATURE_LEVEL" == *"TYPE-SUBSCRIPTIONS"* ]]; then
     #Type registration status callbacks
-    TYPESTATUS1="$CR_SERVICE_PATH/type-status1"
-    TYPESTATUS2="$CR_SERVICE_PATH/type-status2"
+    TYPESTATUS1="$CR_SERVICE_APP_PATH/type-status1"
+    TYPESTATUS2="$CR_SERVICE_APP_PATH/type-status2"
 
     ecs_api_idc_put_subscription 201 subscription-id-1 owner1 $TYPESTATUS1
 
@@ -275,62 +273,62 @@ ecs_api_edp_get_producer_status 200 prod-d ENABLED
 for ((i=1; i<=$NUM_JOBS; i++))
 do
     if [ $(($i%5)) -eq 0 ]; then
-        ecs_api_a1_put_job 201 job$i type1 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
+        ecs_api_a1_put_job 201 job$i type1 $TARGET ric1 $CR_SERVICE_APP_PATH/job_status_ric1 testdata/ecs/job-template.json
         if [  -z "$FLAT_A1_EI" ]; then
             ecs_api_a1_get_job_status 200 type1 job$i ENABLED
         else
             ecs_api_a1_get_job_status 200 job$i ENABLED 120
         fi
         if [ $use_info_jobs ]; then
-            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type101 $TARGET info-owner $CR_SERVICE_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
+            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type101 $TARGET info-owner $CR_SERVICE_APP_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
             ecs_api_idc_get_job_status2 200 job$(($i+$NUM_JOBS)) ENABLED 3 prod-a prod-b prod-c 120
         fi
     fi
     if [ $(($i%5)) -eq 1 ]; then
-        ecs_api_a1_put_job 201 job$i type2 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
+        ecs_api_a1_put_job 201 job$i type2 $TARGET ric1 $CR_SERVICE_APP_PATH/job_status_ric1 testdata/ecs/job-template.json
         if [  -z "$FLAT_A1_EI" ]; then
             ecs_api_a1_get_job_status 200 type2 job$i ENABLED
         else
             ecs_api_a1_get_job_status 200 job$i ENABLED 120
         fi
         if [ $use_info_jobs ]; then
-            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type102 $TARGET info-owner $CR_SERVICE_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
+            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type102 $TARGET info-owner $CR_SERVICE_APP_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
             ecs_api_idc_get_job_status2 200 job$(($i+$NUM_JOBS)) ENABLED 2 prod-b prod-c 120
         fi
     fi
     if [ $(($i%5)) -eq 2 ]; then
-        ecs_api_a1_put_job 201 job$i type3 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
+        ecs_api_a1_put_job 201 job$i type3 $TARGET ric1 $CR_SERVICE_APP_PATH/job_status_ric1 testdata/ecs/job-template.json
         if [  -z "$FLAT_A1_EI" ]; then
             ecs_api_a1_get_job_status 200 type3 job$i ENABLED
         else
             ecs_api_a1_get_job_status 200 job$i ENABLED 120
         fi
         if [ $use_info_jobs ]; then
-            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type103 $TARGET info-owner $CR_SERVICE_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
+            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type103 $TARGET info-owner $CR_SERVICE_APP_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
             ecs_api_idc_get_job_status2 200 job$(($i+$NUM_JOBS)) ENABLED 1 prod-c 120
         fi
     fi
     if [ $(($i%5)) -eq 3 ]; then
-        ecs_api_a1_put_job 201 job$i type4 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
+        ecs_api_a1_put_job 201 job$i type4 $TARGET ric1 $CR_SERVICE_APP_PATH/job_status_ric1 testdata/ecs/job-template.json
         if [  -z "$FLAT_A1_EI" ]; then
             ecs_api_a1_get_job_status 200 type4 job$i ENABLED
         else
             ecs_api_a1_get_job_status 200 job$i ENABLED 120
         fi
         if [ $use_info_jobs ]; then
-            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type104 $TARGET info-owner $CR_SERVICE_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
+            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type104 $TARGET info-owner $CR_SERVICE_APP_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
             ecs_api_idc_get_job_status2 200 job$(($i+$NUM_JOBS)) ENABLED 1 prod-d 120
         fi
     fi
     if [ $(($i%5)) -eq 4 ]; then
-        ecs_api_a1_put_job 201 job$i type5 $TARGET ric1 $CR_SERVICE_PATH/job_status_ric1 testdata/ecs/job-template.json
+        ecs_api_a1_put_job 201 job$i type5 $TARGET ric1 $CR_SERVICE_APP_PATH/job_status_ric1 testdata/ecs/job-template.json
         if [  -z "$FLAT_A1_EI" ]; then
             ecs_api_a1_get_job_status 200 type5 job$i ENABLED
         else
             ecs_api_a1_get_job_status 200 job$i ENABLED 120
         fi
         if [ $use_info_jobs ]; then
-            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type105 $TARGET info-owner $CR_SERVICE_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
+            ecs_api_idc_put_job 201 job$(($i+$NUM_JOBS)) type105 $TARGET info-owner $CR_SERVICE_APP_PATH/job_status_info-owner testdata/ecs/job-template.json VALIDATE
             ecs_api_idc_get_job_status2 200 job$(($i+$NUM_JOBS)) ENABLED 1 prod-d 120
         fi
     fi
index fffef91..f194817 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Testing southbound proxy for PMS and ECS"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM ECS PRODSTUB HTTPPROXY NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM ECS PRODSTUB HTTPPROXY NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=" MR CR PA PRODSTUB RICSIM CP ECS HTTPPROXY KUBEPROXY NGW"
@@ -64,7 +64,7 @@ use_ecs_rest_https
 use_prod_stub_https
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
    echo "PMS VERSION 2 (V2) is required"
    exit 1
@@ -72,9 +72,7 @@ fi
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 STD_NUM_RICS=2
 
@@ -146,7 +144,7 @@ done
 #Check the number of types
 api_equal json:policy-types 2 300
 
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/1"
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/1"
 
 # Create policies in STD
 for ((i=1; i<=$STD_NUM_RICS; i++))
@@ -184,8 +182,8 @@ fi
 TARGET1="$RIC_SIM_HTTPX://$RIC_G1_1:$RIC_SIM_PORT/datadelivery"
 TARGET2="$RIC_SIM_HTTPX://$RIC_G1_1:$RIC_SIM_PORT/datadelivery"
 
-STATUS1="$CR_SERVICE_PATH/job1-status"
-STATUS2="$CR_SERVICE_PATH/job2-status"
+STATUS1="$CR_SERVICE_APP_PATH/job1-status"
+STATUS2="$CR_SERVICE_APP_PATH/job2-status"
 
 prodstub_arm_producer 200 prod-a
 prodstub_arm_type 200 prod-a type1
@@ -197,7 +195,7 @@ ecs_api_service_status 200
 
 if [[ "$ECS_FEATURE_LEVEL" == *"TYPE-SUBSCRIPTIONS"* ]]; then
     #Type registration status callbacks
-    TYPESTATUS1="$CR_SERVICE_PATH/type-status1"
+    TYPESTATUS1="$CR_SERVICE_APP_PATH/type-status1"
 
     ecs_api_idc_put_subscription 201 subscription-id-1 owner1 $TYPESTATUS1
 
@@ -282,10 +280,7 @@ else
     cr_api_check_all_ecs_events 200 job2-status DISABLED
 fi
 
-echo -e $YELLOW"Verify that ECS has send status notification to the callback recevier"$EYELLOW
-echo -e $YELLOW"and check the source of the call in the log to be from the httpproxy"$EYELLOW
-echo -e $YELLOW"Check for 'Calling host'"$EYELLOW
-echo -e $YELLOW"cmd: docker logs <callback-receiver-container-name>"$EYELLOW
+cr_contains_str remote_hosts $HTTP_PROXY_APP_NAME
 
 check_policy_agent_logs
 check_ecs_logs
index 753cc79..321dd24 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Testing southbound proxy for SDNC - docker only"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="RICSIM SDNC HTTPPROXY"
+DOCKER_INCLUDED_IMAGES="RICSIM SDNC HTTPPROXY KUBEPROXY"
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=""
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
@@ -65,6 +65,8 @@ for __nb_httpx in $NB_TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
+        start_kube_proxy
+
         start_http_proxy
 
         start_ric_simulators ricsim_g1 1  OSC_2.1.0
diff --git a/test/auto-test/FTC2003.sh b/test/auto-test/FTC2003.sh
new file mode 100755 (executable)
index 0000000..6e22ced
--- /dev/null
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="Testing southbound proxy for Dmaap Adaptor"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="CR MR ECS HTTPPROXY KUBEPROXY DMAAPADP"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES=" CR MR ECS HTTPPROXY KUBEPROXY DMAAPADP"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-E-RELEASE"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh $@
+. ../common/ecs_api_functions.sh
+. ../common/cr_api_functions.sh
+. ../common/mr_api_functions.sh
+. ../common/http_proxy_api_functions.sh
+. ../common/kube_proxy_api_functions.sh
+. ../common/dmaapadp_api_functions.sh
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+#Local vars in test script
+##########################
+
+FLAT_A1_EI="1"
+NUM_JOBS=10
+
+clean_environment
+
+use_cr_https
+use_ecs_rest_https
+use_mr_https
+use_dmaapadp_https
+
+start_kube_proxy
+
+start_http_proxy
+
+start_cr
+
+start_ecs NOPROXY $SIM_GROUP/$ECS_COMPOSE_DIR/$ECS_CONFIG_FILE
+
+set_ecs_trace
+
+start_mr
+
+start_dmaapadp PROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
+
+set_dmaapadp_trace
+
+if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
+    ecs_equal json:data-producer/v1/info-producers 1 60
+else
+    ecs_equal json:ei-producer/v1/eiproducers 1 60
+fi
+
+ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
+ecs_api_idc_get_type_ids 200 ExampleInformationType
+
+
+ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 joby$i ExampleInformationType $CR_SERVICE_MR_PATH/joby-data$i info-ownery$i $CR_SERVICE_MR_PATH/job_status_info-ownery$i testdata/dmaap-adapter/job-template.json
+done
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_a1_get_job_status 200 joby$i ENABLED 30
+done
+
+
+# Adapter data
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
+
+cr_equal received_callbacks $(($NUM_JOBS*2)) 60
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_equal received_callbacks?id=joby-data$i 2
+done
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_json_event 200 joby-data$i '{"msg":"msg-1"}'
+    cr_api_check_single_genric_json_event 200 joby-data$i '{"msg":"msg-3"}'
+done
+
+cr_contains_str remote_hosts $HTTP_PROXY_APP_NAME
+
+#### TEST COMPLETE ####
+
+store_logs          END
+
+print_result
+
+auto_clean_environment
\ No newline at end of file
index ac1259f..4503c88 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Resync 10000 policies using OSC and STD interface"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -97,9 +97,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         start_ric_simulators ricsim_g1 4 OSC_2.1.0
 
@@ -158,13 +156,13 @@ for __httpx in $TESTED_PROTOCOLS ; do
             api_equal json:policy_types 2 120  #Wait for the agent to refresh types from the simulator
         fi
 
-        api_put_service 201 "serv1" 3600 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "serv1" 3600 "$CR_SERVICE_APP_PATH/1"
 
         START_ID=2000
         NUM_POLICIES=10000  # Must be at least 100
 
         if [ "$PMS_VERSION" == "V2" ]; then
-            notificationurl=$CR_SERVICE_PATH"/test"
+            notificationurl=$CR_SERVICE_APP_PATH"/test"
         else
             notificationurl=""
         fi
diff --git a/test/auto-test/FTC3000.sh b/test/auto-test/FTC3000.sh
new file mode 100755 (executable)
index 0000000..da4bf1e
--- /dev/null
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-E-RELEASE"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh $@
+. ../common/agent_api_functions.sh
+. ../common/ricsimulator_api_functions.sh
+. ../common/ecs_api_functions.sh
+. ../common/prodstub_api_functions.sh
+. ../common/cr_api_functions.sh
+. ../common/rapp_catalogue_api_functions.sh
+. ../common/mr_api_functions.sh
+. ../common/control_panel_api_functions.sh
+. ../common/controller_api_functions.sh
+. ../common/consul_cbs_functions.sh
+. ../common/http_proxy_api_functions.sh
+. ../common/kube_proxy_api_functions.sh
+. ../common/gateway_api_functions.sh
+. ../common/dmaapmed_api_functions.sh
+. ../common/dmaapadp_api_functions.sh
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+#Local vars in test script
+##########################
+FLAT_A1_EI="1"
+NUM_JOBS=100  # Mediator and adapter gets same number of jobs
+
+clean_environment
+
+#use_cr_https
+use_cr_http
+use_ecs_rest_https
+use_mr_https
+use_dmaapadp_https
+use_dmaapmed_https
+
+start_kube_proxy
+
+start_cr
+
+start_ecs NOPROXY $SIM_GROUP/$ECS_COMPOSE_DIR/$ECS_CONFIG_FILE
+
+set_ecs_trace
+
+start_mr
+
+start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
+
+set_dmaapadp_trace
+
+start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_DATA_FILE
+
+ecs_equal json:data-producer/v1/info-producers 2 60
+
+# Check producers
+ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
+
+
+# Create jobs for adapter
+start_timer "Create adapter jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+done
+print_timer "Create adapter jobs: $NUM_JOBS"
+
+# Create jobs for mediator
+start_timer "Create mediator jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+done
+print_timer "Create mediator jobs: $NUM_JOBS"
+
+# Check job status
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30
+    ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30
+done
+
+EXPECTED_DATA_DELIV=0
+
+# Send data to adapter via mr
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
+
+# Wait for data recetption, adapter
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter, 2 json per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery adapter, 2 json per job"
+EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+
+# Send data to mediator
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
+
+# Wait for data reception, mediator
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery mediator, 2 json per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery mediator, 2 json per job"
+EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+
+# Check received number of messages for mediator and adapter callbacks
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_equal received_callbacks?id=job-med-data$i 2
+    cr_equal received_callbacks?id=job-adp-data$i 2
+done
+
+# Check received data and order for mediator and adapter callbacks
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}'
+    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}'
+    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}'
+    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}'
+done
+
+# Set delay in the callback receiver to slow down callbacks
+SEC_DELAY=5
+cr_delay_callback 200 $SEC_DELAY
+
+# Send data to adapter via mr
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}'
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}'
+
+# Wait for data recetption, adapter
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
+print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+
+
+# Send data to mediator
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}'
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}'
+
+# Wait for data reception, mediator
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000
+print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+
+# Check received number of messages for mediator and adapter callbacks
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_equal received_callbacks?id=job-med-data$i 4
+    cr_equal received_callbacks?id=job-adp-data$i 4
+done
+
+# Check received data and order for mediator and adapter callbacks
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}'
+    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}'
+    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}'
+    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}'
+done
+
+
+
+#### TEST COMPLETE ####
+
+store_logs          END
+
+print_result
+
+auto_clean_environment
\ No newline at end of file
index 33e4966..e017643 100755 (executable)
@@ -21,7 +21,7 @@
 TC_ONELINE_DESCR="Resync of RIC via changes in the consul config or pushed config"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
 
 #Supported test environment profiles
 SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
@@ -54,13 +54,15 @@ for consul_conf in $TESTED_VARIANTS ; do
     # Clean container and start all needed containers #
     clean_environment
 
+    start_kube_proxy
+
     start_policy_agent NOPROXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
 
     set_agent_trace
 
     # Create service to be able to receive events when rics becomes available
     # Must use rest towards the agent since dmaap is not configured yet
-    api_put_service 201 "ric-registration" 0 "$CR_SERVICE_PATH/ric-registration"
+    api_put_service 201 "ric-registration" 0 "$CR_SERVICE_APP_PATH/ric-registration"
 
     # Start one RIC of each type
     start_ric_simulators ricsim_g1 1  OSC_2.1.0
index 6cb6b88..25bdc4c 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Change supported policy types and reconfigure rics"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -69,9 +69,7 @@ for interface in $TESTED_VARIANTS ; do
     # Clean container and start all needed containers #
     clean_environment
 
-    if [ $RUNMODE == "KUBE" ]; then
-        start_kube_proxy
-    fi
+    start_kube_proxy
 
     #Start simulators and prepare two configs
 
@@ -110,7 +108,7 @@ for interface in $TESTED_VARIANTS ; do
 
     # Create service to be able to receive events when rics becomes available
     # Must use rest towards the agent since dmaap is not configured yet
-    api_put_service 201 "ric-registration" 0 "$CR_SERVICE_PATH/ric-registration"
+    api_put_service 201 "ric-registration" 0 "$CR_SERVICE_APP_PATH/ric-registration"
 
     #Load first config
     if [ $RUNMODE == "KUBE" ]; then
@@ -283,10 +281,10 @@ for interface in $TESTED_VARIANTS ; do
         api_equal json:policy_types 5
     fi
 
-    api_put_service 201 "serv1" 3600 "$CR_SERVICE_PATH/serv1"
+    api_put_service 201 "serv1" 3600 "$CR_SERVICE_APP_PATH/serv1"
 
     if [ "$PMS_VERSION" == "V2" ]; then
-        notificationurl=$CR_SERVICE_PATH"/test"
+        notificationurl=$CR_SERVICE_APP_PATH"/test"
     else
         notificationurl=""
     fi
index 19fd96f..27675be 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Create 10000 policies in sequence using http/https and Agent REST/DMAAP with/without SDNC controller"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -91,9 +91,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         start_ric_simulators ricsim_g1 1 OSC_2.1.0
         start_ric_simulators ricsim_g2 1 STD_1.1.3
@@ -153,10 +151,10 @@ for __httpx in $TESTED_PROTOCOLS ; do
             api_equal json:policy_types 2 300  #Wait for the agent to refresh types from the simulators
         fi
 
-        api_put_service 201 "serv1" 3600 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "serv1" 3600 "$CR_SERVICE_APP_PATH/1"
 
         if [ "$PMS_VERSION" == "V2" ]; then
-            notificationurl=$CR_SERVICE_PATH"/test"
+            notificationurl=$CR_SERVICE_APP_PATH"/test"
         else
             notificationurl=""
         fi
index e90f316..02f6758 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="PMS Create 10000 policies and restart, test polices persistency"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR PA RICSIM SDNC KUBEPROXY NGW"
@@ -67,7 +67,7 @@ NUM_POLICIES_PER_RIC=2000
 generate_policy_uuid
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
@@ -100,9 +100,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         start_ric_simulators ricsim_g1 $NUM_RICS STD_2.0.0
 
@@ -154,7 +152,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
             api_equal json:policy_types 1 300  #Wait for the agent to refresh types from the simulator
         fi
 
-        api_put_service 201 "serv1" 0 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "serv1" 0 "$CR_SERVICE_APP_PATH/1"
 
         echo "Check the number of types in the agent for each ric is 1"
         for ((i=1; i<=$NUM_RICS; i++))
@@ -182,7 +180,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
             sim_equal ricsim_g1_$i num_instances $NUM_POLICIES_PER_RIC
         done
 
-        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_PATH/1"
+        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_APP_PATH/1"
 
         stop_policy_agent
 
@@ -219,7 +217,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         print_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
 
-        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_PATH/1"
+        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_APP_PATH/1"
 
         start_timer "Delete $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
 
@@ -249,7 +247,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         sleep_wait 200
 
-        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_PATH/1"
+        api_get_services 200 "serv1" "serv1" 0 "$CR_SERVICE_APP_PATH/1"
 
         api_equal json:policies 0
 
index 78c8e0d..02b8db9 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Repeatedly create and delete policies in each RICs for 24h (or configured number of days). Via agent REST/DMAAP/DMAAP_BATCH and SDNC using http or https"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -70,9 +70,7 @@ DAYS=3
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-   start_kube_proxy
-fi
+start_kube_proxy
 
 # use HTTP or HTTPS for all apis
 HTTPX=HTTPS
@@ -199,7 +197,7 @@ do
 done
 
 echo "Register a service"
-api_put_service 201 "serv1" 0 "$CR_SERVICE_PATH/1"
+api_put_service 201 "serv1" 0 "$CR_SERVICE_APP_PATH/1"
 
 TEST_DURATION=$((24*3600*$DAYS))
 TEST_START=$SECONDS
@@ -209,7 +207,7 @@ AGENT_INTERFACES="REST REST_PARALLEL DMAAP DMAAP-BATCH"
 MR_MESSAGES=0
 
 if [ "$PMS_VERSION" == "V2" ]; then
-      notificationurl=$CR_SERVICE_PATH"/test"
+      notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
       notificationurl=""
 fi
index e72ad07..a5f1978 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Create/delete policies in parallel over a number of rics using a number of child process"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -67,7 +67,7 @@ NUM_POLICIES_PER_RIC=500
 generate_policy_uuid
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
@@ -102,9 +102,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
         # Clean container and start all needed containers #
         clean_environment
 
-        if [ $RUNMODE == "KUBE" ]; then
-            start_kube_proxy
-        fi
+        start_kube_proxy
 
         start_ric_simulators ricsim_g1 $NUM_RICS OSC_2.1.0
 
@@ -158,7 +156,7 @@ for __httpx in $TESTED_PROTOCOLS ; do
             api_equal json:policy_types 1 300  #Wait for the agent to refresh types from the simulator
         fi
 
-        api_put_service 201 "serv1" 600 "$CR_SERVICE_PATH/1"
+        api_put_service 201 "serv1" 600 "$CR_SERVICE_APP_PATH/1"
 
         echo "Check the number of types in the agent for each ric is 1"
         for ((i=1; i<=$NUM_RICS; i++))
index 71ea398..e698f62 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Preparation for test of the Control Panel and the Health Check app - populating a number of ric simulators with types and instances"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM KUBEPROXY NGW"
@@ -54,9 +54,7 @@ setup_testenvironment
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 OSC_NUM_RICS=6
 STD_NUM_RICS=5
@@ -186,10 +184,10 @@ fi
 # Create policies
 use_agent_rest_http
 
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/1"
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/1"
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
similarity index 76%
rename from test/auto-test/FTC_HELM_RECIPE_E_RELEASE.sh
rename to test/auto-test/FTC_HELM_E_RELEASE.sh
index b609222..71a5d50 100755 (executable)
@@ -17,7 +17,9 @@
 #  ============LICENSE_END=================================================
 #
 
-TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components - E-RELEASE"
+TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm chats - all components - E-RELEASE"
+# This script requires the helm charts for nonrtric, a1simulator and a1controller are installed
+# There should be 2 simulator of version started
 
 #App names to include in the test when running docker, space separated list
 DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
@@ -25,7 +27,7 @@ DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=" MR CR  PRODSTUB KUBEPROXY"
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
-KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC"
+KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC DMAAPMED DMAAPADP"
 
 #Supported test environment profiles
 SUPPORTED_PROFILES="ORAN-E-RELEASE"
@@ -43,6 +45,8 @@ SUPPORTED_RUNMODES="KUBE"
 . ../common/control_panel_api_functions.sh
 . ../common/controller_api_functions.sh
 . ../common/kube_proxy_api_functions.sh
+. ../common/dmaapmed_api_functions.sh
+. ../common/dmaapadp_api_functions.sh
 
 setup_testenvironment
 
@@ -55,6 +59,7 @@ use_sdnc_https
 use_simulator_https
 use_ecs_rest_https
 use_prod_stub_https
+
 if [ $ECS_VERSION == "V1-1" ]; then
     use_rapp_catalogue_http # https not yet supported
 else
@@ -148,6 +153,22 @@ do
     sim_print "a1-sim-osc-"$i interface
 done
 
+# Check the number of policies in STD and STD2
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-std-"$i num_instances 0
+    sim_equal "a1-sim-std2-"$i num_instances 0
+done
+
+# Check the number of policies in OSC
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-osc-"$i num_instances 0
+done
+
+#Check the number of schemas
+api_equal json:policy-types 1
+
 # Load the polictypes in STD 2
 for ((i=0; i<$STD_NUM_RICS; i++))
 do
@@ -219,16 +240,16 @@ else
     api_equal json:policy_ids 0
 fi
 
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/ER-app"
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/ER-app"
 
 # Create policies in STD
 for ((i=0; i<$STD_NUM_RICS; i++))
 do
     ricid=$((3+$i))
     generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
+    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1100+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"std2" testdata/STD/pi1_template.json 1
     generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
+    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1200+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"std2" testdata/STD/pi1_template.json 1
 done
 
 #Create policies in STD 2
@@ -236,9 +257,9 @@ for ((i=0; i<$STD_NUM_RICS; i++))
 do
    ricid=$((5+$i))
    generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS_0_2_0 $((2100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos_template.json 1
+   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS_0_2_0 $((2100+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"std2" testdata/STD2/pi_qos_template.json 1
    generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS2_0.1.0 $((2200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos2_template.json 1
+   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS2_0.1.0 $((2200+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"std2" testdata/STD2/pi_qos2_template.json 1
 done
 
 # Create policies in OSC
@@ -246,9 +267,9 @@ for ((i=0; i<$OSC_NUM_RICS; i++))
 do
     ricid=$((1+$i))
     generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 1 $((3100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi1_template.json 1
+    api_put_policy 201 "Emergency-response-app" ric$ricid 1 $((3100+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"osc" testdata/OSC/pi1_template.json 1
     generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 2 $((3200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi2_template.json 1
+    api_put_policy 201 "Emergency-response-app" ric$ricid 2 $((3200+$i)) NOTRANSIENT $CR_SERVICE_APP_PATH/"osc" testdata/OSC/pi2_template.json 1
 done
 
 
@@ -317,8 +338,8 @@ CB_SV="$PROD_STUB_SERVICE_PATH$PROD_STUB_SUPERVISION_CALLBACK"
 TARGET1="$RIC_SIM_HTTPX://a1-sim-std2-0.a1-sim:$RIC_SIM_PORT/datadelivery"
 TARGET2="$RIC_SIM_HTTPX://a1-sim-std2-1.a1-sim:$RIC_SIM_PORT/datadelivery"
 
-STATUS1="$CR_SERVICE_PATH/job1-status"
-STATUS2="$CR_SERVICE_PATH/job2-status"
+STATUS1="$CR_SERVICE_APP_PATH/job1-status"
+STATUS2="$CR_SERVICE_APP_PATH/job2-status"
 
 prodstub_arm_producer 200 prod-a
 prodstub_arm_type 200 prod-a type1
@@ -384,6 +405,55 @@ else
     fi
 fi
 
+# Dmaap mediator and adapter
+start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
+
+start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_DATA_FILE
+
+ecs_equal json:ei-producer/v1/eiproducers 2 60
+
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+
+ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
+
+NUM_JOBS=5
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 jobx$i STD_Fault_Messages $CR_SERVICE_MR_PATH/jobx-data$i info-ownerx$i $CR_SERVICE_MR_PATH/job_status_info-ownerx$i testdata/dmaap-adapter/job-template.json
+done
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 joby$i ExampleInformationType $CR_SERVICE_MR_PATH/joby-data$i info-ownery$i $CR_SERVICE_MR_PATH/job_status_info-ownery$i testdata/dmaap-adapter/job-template.json
+done
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_a1_get_job_status 200 jobx$i ENABLED 30
+done
+
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
+mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
+mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
+
+cr_equal received_callbacks $(($NUM_JOBS*2*2)) 60
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_equal received_callbacks?id=jobx-data$i 2
+    cr_equal received_callbacks?id=joby-data$i 2
+done
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_json_event 200 jobx-data$i '{"msg":"msg-0"}'
+    cr_api_check_single_genric_json_event 200 jobx-data$i '{"msg":"msg-2"}'
+    cr_api_check_single_genric_json_event 200 joby-data$i '{"msg":"msg-1"}'
+    cr_api_check_single_genric_json_event 200 joby-data$i '{"msg":"msg-3"}'
+done
+
+
 stop_ecs
 
 start_stopped_ecs
diff --git a/test/auto-test/FTC_HELM_RECIPE_CHERRY.sh b/test/auto-test/FTC_HELM_RECIPE_CHERRY.sh
deleted file mode 100755 (executable)
index 7465b40..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-#!/usr/bin/env bash
-
-#  ============LICENSE_START===============================================
-#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
-#  ========================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=================================================
-#
-
-TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components - CHERRY release"
-
-#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
-
-#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" MR CR  PRODSTUB KUBEPROXY"
-#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
-KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC"
-
-#Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-CHERRY"
-#Supported run modes
-SUPPORTED_RUNMODES="KUBE"
-
-. ../common/testcase_common.sh $@
-. ../common/agent_api_functions.sh
-. ../common/ricsimulator_api_functions.sh
-. ../common/ecs_api_functions.sh
-. ../common/prodstub_api_functions.sh
-. ../common/cr_api_functions.sh
-. ../common/rapp_catalogue_api_functions.sh
-. ../common/mr_api_functions.sh
-. ../common/control_panel_api_functions.sh
-. ../common/controller_api_functions.sh
-. ../common/kube_proxy_api_functions.sh
-
-setup_testenvironment
-
-#### TEST BEGIN ####
-
-use_mr_http       #MR only supports http?
-use_cr_https
-use_agent_rest_https
-use_sdnc_https
-use_simulator_https
-use_ecs_rest_https
-use_prod_stub_https
-if [ $ECS_VERSION == "V1-1" ]; then
-    use_rapp_catalogue_http # https not yet supported
-else
-    use_rapp_catalogue_https
-fi
-
-echo -e "$RED CHECK WHY RC HTTPS DOES NOT WORK $ERED"
-
-use_control_panel_https
-
-if [ "$PMS_VERSION" == "V1" ]; then
-   echo "PMS VERSION 2 (V2) is required"
-   exit 1
-fi
-
-clean_environment
-
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
-
-STD_NUM_RICS=2
-OSC_NUM_RICS=2
-
-start_ric_simulators a1-sim-osc $STD_NUM_RICS OSC_2.1.0
-echo " RIC MAPPING a1-sim-osc-0 : ric1"
-echo " RIC MAPPING a1-sim-osc-1 : ric2"
-
-start_ric_simulators a1-sim-std $STD_NUM_RICS STD_1.1.3
-echo " RIC MAPPING a1-sim-std-0 : ric3"
-echo " RIC MAPPING a1-sim-std-1 : ric4"
-
-start_ric_simulators a1-sim-std2 $STD_NUM_RICS STD_2.0.0
-echo " RIC MAPPING a1-sim-std2-0 : ric5"
-echo " RIC MAPPING a1-sim-std2-1 : ric6"
-
-start_mr
-
-start_control_panel
-
-start_sdnc
-
-start_policy_agent
-
-start_cr
-
-start_prod_stub
-
-start_ecs NOPROXY
-
-set_ecs_trace
-
-start_rapp_catalogue
-
-set_agent_trace
-
-#### Test RAPP Catalogue ####
-
-rapp_cat_api_get_services 200 EMPTY
-
-rapp_cat_api_put_service 201 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
-
-rapp_cat_api_get_services 200 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
-
-#Check the number of services
-rc_equal json:services 1
-
-api_get_status 200
-
-#### Test Policy Management Service ####
-
-# Print the A1 version for STD 1.1.X
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_print "a1-sim-std-"$i interface
-done
-
-# Print the A1 version for STD 2.0.X
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   sim_print "a1-sim-std2-"$i interface
-done
-
-# Print the A1 version for OSC 2.1.X
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    sim_print "a1-sim-osc-"$i interface
-done
-
-
-# Load the polictypes in STD 2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS_0_2_0 testdata/STD2/sim_qos.json
-   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
-done
-
-# Load the polictypes in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    sim_put_policy_type 201 "a1-sim-osc-"$i 1 testdata/OSC/sim_1.json
-    sim_put_policy_type 201 "a1-sim-osc-"$i 2 testdata/OSC/sim_2.json
-done
-
-# Check that all rics are synced in
-api_equal json:rics 6 300
-
-#Check the number of schemas and the individual schemas
-api_equal json:policy-types 5 300
-
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    ricid=$((3+$i))
-    api_equal json:policy-types?ric_id=ric$ricid 1 120
-done
-
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   api_equal json:policy-types?ric_id=ric$ricid 2 120
-done
-
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    ricid=$((1+$i))
-    api_equal json:policy-types?ric_id=ric$ricid 2 120
-done
-
-#Check the schemas in STD 2
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   api_get_policy_type 200 STD_QOS_0_2_0 testdata/STD2/qos-agent-modified.json
-   api_get_policy_type 200 STD_QOS2_0.1.0 testdata/STD2/qos2-agent-modified.json
-done
-
-# Check the schemas in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    api_get_policy_type 200 1 testdata/OSC/1-agent-modified.json
-    api_get_policy_type 200 2 testdata/OSC/2-agent-modified.json
-done
-
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/ER-app"
-
-# Create policies in STD
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    ricid=$((3+$i))
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
-done
-
-#Create policies in STD 2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS_0_2_0 $((2100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos_template.json 1
-   generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS2_0.1.0 $((2200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos2_template.json 1
-done
-
-# Create policies in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    ricid=$((1+$i))
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 1 $((3100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi1_template.json 1
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 2 $((3200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi2_template.json 1
-done
-
-
-# Check the number of policies in STD and STD2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-std-"$i num_instances 2
-    sim_equal "a1-sim-std2-"$i num_instances 2
-done
-
-# Check the number of policies in OSC
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-osc-"$i num_instances 2
-done
-
-echo "ADD EVENT/STATUS CHECK"
-echo "ADD MR CHECK"
-
-FLAT_A1_EI="1"
-
-ecs_api_admin_reset
-
-CB_JOB="$PROD_STUB_SERVICE_PATH$PROD_STUB_JOB_CALLBACK"
-CB_SV="$PROD_STUB_SERVICE_PATH$PROD_STUB_SUPERVISION_CALLBACK"
-TARGET1="$RIC_SIM_HTTPX://a1-sim-std2-0.a1-sim:$RIC_SIM_PORT/datadelivery"
-TARGET2="$RIC_SIM_HTTPX://a1-sim-std2-1.a1-sim:$RIC_SIM_PORT/datadelivery"
-
-STATUS1="$CR_SERVICE_PATH/job1-status"
-STATUS2="$CR_SERVICE_PATH/job2-status"
-
-prodstub_arm_producer 200 prod-a
-prodstub_arm_type 200 prod-a type1
-prodstub_arm_job_create 200 prod-a job1
-prodstub_arm_job_create 200 prod-a job2
-
-
-### ecs status
-ecs_api_service_status 200
-
-## Setup prod-a
-ecs_api_edp_put_producer 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
-
-ecs_api_edp_get_producer 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
-
-ecs_api_edp_get_producer_status 200 prod-a ENABLED
-
-
-## Create a job for prod-a
-## job1 - prod-a
-if [  -z "$FLAT_A1_EI" ]; then
-    ecs_api_a1_put_job 201 type1 job1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-else
-    ecs_api_a1_put_job 201 job1 type1 $TARGET1 ricsim_g3_1 $STATUS1 testdata/ecs/job-template.json
-fi
-
-# Check the job data in the producer
-if [ $ECS_VERSION == "V1-1" ]; then
-    prodstub_check_jobdata 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-else
-    prodstub_check_jobdata_2 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-fi
-
-## Create a second job for prod-a
-## job2 - prod-a
-if [  -z "$FLAT_A1_EI" ]; then
-    ecs_api_a1_put_job 201 type1 job2 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-else
-    ecs_api_a1_put_job 201 job2 type1 $TARGET2 ricsim_g3_2 $STATUS2 testdata/ecs/job-template.json
-fi
-
-# Check the job data in the producer
-if [ $ECS_VERSION == "V1-1" ]; then
-    prodstub_check_jobdata 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-else
-    prodstub_check_jobdata_2 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-fi
-
-echo "ADD EVENT/STATUS CHECK"
-
-check_policy_agent_logs
-check_ecs_logs
-check_sdnc_logs
-
-#### TEST COMPLETE ####
-
-store_logs          END
-
-print_result
diff --git a/test/auto-test/FTC_HELM_RECIPE_D_RELEASE.sh b/test/auto-test/FTC_HELM_RECIPE_D_RELEASE.sh
deleted file mode 100755 (executable)
index 391fdcb..0000000
+++ /dev/null
@@ -1,409 +0,0 @@
-#!/usr/bin/env bash
-
-#  ============LICENSE_START===============================================
-#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
-#  ========================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=================================================
-#
-
-TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components - D-RELEASE"
-
-#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
-
-#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" MR CR  PRODSTUB KUBEPROXY"
-#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
-KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC"
-
-#Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-D-RELEASE"
-#Supported run modes
-SUPPORTED_RUNMODES="KUBE"
-
-. ../common/testcase_common.sh $@
-. ../common/agent_api_functions.sh
-. ../common/ricsimulator_api_functions.sh
-. ../common/ecs_api_functions.sh
-. ../common/prodstub_api_functions.sh
-. ../common/cr_api_functions.sh
-. ../common/rapp_catalogue_api_functions.sh
-. ../common/mr_api_functions.sh
-. ../common/control_panel_api_functions.sh
-. ../common/controller_api_functions.sh
-. ../common/kube_proxy_api_functions.sh
-
-setup_testenvironment
-
-#### TEST BEGIN ####
-
-use_mr_http       #MR only supports http?
-use_cr_https
-use_agent_rest_https
-use_sdnc_https
-use_simulator_https
-use_ecs_rest_https
-use_prod_stub_https
-if [ $ECS_VERSION == "V1-1" ]; then
-    use_rapp_catalogue_http # https not yet supported
-else
-    ########################################use_rapp_catalogue_https
-    use_rapp_catalogue_http
-fi
-
-echo -e "$RED CHECK WHY RC HTTPS DOES NOT WORK $ERED"
-
-###############################use_control_panel_https
-use_control_panel_http
-
-if [ "$PMS_VERSION" == "V1" ]; then
-   echo "PMS VERSION 2 (V2) is required"
-   exit 1
-fi
-
-clean_environment
-
-pms_kube_pvc_reset
-
-ecs_kube_pvc_reset
-
-start_kube_proxy
-
-STD_NUM_RICS=2
-OSC_NUM_RICS=2
-
-start_ric_simulators a1-sim-osc $STD_NUM_RICS OSC_2.1.0
-echo " RIC MAPPING a1-sim-osc-0 : ric1"
-echo " RIC MAPPING a1-sim-osc-1 : ric2"
-
-start_ric_simulators a1-sim-std $STD_NUM_RICS STD_1.1.3
-echo " RIC MAPPING a1-sim-std-0 : ric3"
-echo " RIC MAPPING a1-sim-std-1 : ric4"
-
-start_ric_simulators a1-sim-std2 $STD_NUM_RICS STD_2.0.0
-echo " RIC MAPPING a1-sim-std2-0 : ric5"
-echo " RIC MAPPING a1-sim-std2-1 : ric6"
-
-start_mr
-
-start_control_panel
-
-start_sdnc
-
-start_policy_agent
-
-start_cr
-
-start_prod_stub
-
-start_ecs NOPROXY
-
-set_ecs_trace
-
-start_rapp_catalogue
-
-set_agent_trace
-
-#### Test RAPP Catalogue ####
-
-rapp_cat_api_get_services 200 EMPTY
-
-rapp_cat_api_put_service 201 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
-
-rapp_cat_api_get_services 200 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
-
-#Check the number of services
-rc_equal json:services 1
-
-api_get_status 200
-
-#### Test Policy Management Service ####
-
-# Print the A1 version for STD 1.1.X
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_print "a1-sim-std-"$i interface
-done
-
-# Print the A1 version for STD 2.0.X
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   sim_print "a1-sim-std2-"$i interface
-done
-
-# Print the A1 version for OSC 2.1.X
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    sim_print "a1-sim-osc-"$i interface
-done
-
-# Load the polictypes in STD 2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS_0_2_0 testdata/STD2/sim_qos.json
-   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
-done
-
-# Load the polictypes in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    sim_put_policy_type 201 "a1-sim-osc-"$i 1 testdata/OSC/sim_1.json
-    sim_put_policy_type 201 "a1-sim-osc-"$i 2 testdata/OSC/sim_2.json
-done
-
-# Check that all rics are synced in
-api_equal json:rics 6 300
-
-#Check the number of schemas and the individual schemas
-api_equal json:policy-types 5 300
-
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    ricid=$((3+$i))
-    api_equal json:policy-types?ric_id=ric$ricid 1 120
-done
-
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   api_equal json:policy-types?ric_id=ric$ricid 2 120
-done
-
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    ricid=$((1+$i))
-    api_equal json:policy-types?ric_id=ric$ricid 2 120
-done
-
-#Check the schemas in STD 2
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   api_get_policy_type 200 STD_QOS_0_2_0 testdata/STD2/qos-agent-modified.json
-   api_get_policy_type 200 STD_QOS2_0.1.0 testdata/STD2/qos2-agent-modified.json
-done
-
-# Check the schemas in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    api_get_policy_type 200 1 testdata/OSC/1-agent-modified.json
-    api_get_policy_type 200 2 testdata/OSC/2-agent-modified.json
-done
-
-if [ "$PMS_VERSION" == "V2" ]; then
-
-    api_equal json:policy-types 5 120
-
-    api_equal json:policies 0
-
-    api_equal json:policy-instances 0
-else
-
-    api_equal json:policy_schemas 5 120
-
-    api_equal json:policy_types 5
-
-    api_equal json:policies 0
-
-    api_equal json:policy_ids 0
-fi
-
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/ER-app"
-
-# Create policies in STD
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    ricid=$((3+$i))
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
-done
-
-#Create policies in STD 2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-   ricid=$((5+$i))
-   generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS_0_2_0 $((2100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos_template.json 1
-   generate_policy_uuid
-   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS2_0.1.0 $((2200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos2_template.json 1
-done
-
-# Create policies in OSC
-for ((i=0; i<$OSC_NUM_RICS; i++))
-do
-    ricid=$((1+$i))
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 1 $((3100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi1_template.json 1
-    generate_policy_uuid
-    api_put_policy 201 "Emergency-response-app" ric$ricid 2 $((3200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi2_template.json 1
-done
-
-
-# Check the number of policies in STD and STD2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-std-"$i num_instances 2
-    sim_equal "a1-sim-std2-"$i num_instances 2
-done
-
-# Check the number of policies in OSC
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-osc-"$i num_instances 2
-done
-
-stop_policy_agent
-
-start_stopped_policy_agent
-
-# Check PMS state after restart
-
-sleep_wait 200
-
-if [ "$PMS_VERSION" == "V2" ]; then
-
-    api_equal json:policy-types 5 120
-
-    api_equal json:policies 12
-
-    api_equal json:policy-instances 12
-else
-
-    api_equal json:policy_schemas 5 120
-
-    api_equal json:policy_types 5
-
-    api_equal json:policies 12
-
-    api_equal json:policy_ids 12
-fi
-
-# Check the number of policies in STD and STD2
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-std-"$i num_instances 2
-    sim_equal "a1-sim-std2-"$i num_instances 2
-done
-
-# Check the number of policies in OSC
-for ((i=0; i<$STD_NUM_RICS; i++))
-do
-    sim_equal "a1-sim-osc-"$i num_instances 2
-done
-
-
-echo "ADD EVENT/STATUS CHECK"
-echo "ADD MR CHECK"
-
-FLAT_A1_EI="1"
-
-ecs_api_admin_reset
-
-CB_JOB="$PROD_STUB_SERVICE_PATH$PROD_STUB_JOB_CALLBACK"
-CB_SV="$PROD_STUB_SERVICE_PATH$PROD_STUB_SUPERVISION_CALLBACK"
-TARGET1="$RIC_SIM_HTTPX://a1-sim-std2-0.a1-sim:$RIC_SIM_PORT/datadelivery"
-TARGET2="$RIC_SIM_HTTPX://a1-sim-std2-1.a1-sim:$RIC_SIM_PORT/datadelivery"
-
-STATUS1="$CR_SERVICE_PATH/job1-status"
-STATUS2="$CR_SERVICE_PATH/job2-status"
-
-prodstub_arm_producer 200 prod-a
-prodstub_arm_type 200 prod-a type1
-prodstub_arm_job_create 200 prod-a job1
-prodstub_arm_job_create 200 prod-a job2
-
-
-### ecs status
-ecs_api_service_status 200
-
-## Setup prod-a
-if [ $ECS_VERSION == "V1-1" ]; then
-    ecs_api_edp_put_producer 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
-
-    ecs_api_edp_get_producer 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
-else
-    ecs_api_edp_put_type_2 201 type1 testdata/ecs/ei-type-1.json
-    ecs_api_edp_get_type_2 200 type1
-    ecs_api_edp_get_type_ids 200 type1
-
-    ecs_api_edp_put_producer_2 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
-    ecs_api_edp_put_producer_2 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
-fi
-
-ecs_api_edp_get_producer_status 200 prod-a ENABLED
-
-
-## Create a job for prod-a
-## job1 - prod-a
-if [  -z "$FLAT_A1_EI" ]; then
-    ecs_api_a1_put_job 201 type1 job1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-else
-    ecs_api_a1_put_job 201 job1 type1 $TARGET1 ricsim_g3_1 $STATUS1 testdata/ecs/job-template.json
-fi
-
-# Check the job data in the producer
-if [ $ECS_VERSION == "V1-1" ]; then
-    prodstub_check_jobdata 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-else
-    if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
-        prodstub_check_jobdata_3 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-    else
-        prodstub_check_jobdata_2 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
-    fi
-fi
-
-## Create a second job for prod-a
-## job2 - prod-a
-if [  -z "$FLAT_A1_EI" ]; then
-    ecs_api_a1_put_job 201 type1 job2 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-else
-    ecs_api_a1_put_job 201 job2 type1 $TARGET2 ricsim_g3_2 $STATUS2 testdata/ecs/job-template.json
-fi
-
-# Check the job data in the producer
-if [ $ECS_VERSION == "V1-1" ]; then
-    prodstub_check_jobdata 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-else
-    if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
-        prodstub_check_jobdata_3 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-    else
-        prodstub_check_jobdata_2 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
-    fi
-fi
-
-stop_ecs
-
-start_stopped_ecs
-
-# Check ECS status after restart
-
-if [  -z "$FLAT_A1_EI" ]; then
-    ecs_api_a1_get_job_status 200 type1 job1 DISABLED
-    ecs_api_a1_get_job_status 200 type1 job2 DISABLED
-else
-    ecs_api_a1_get_job_status 200 job1 DISABLED
-    ecs_api_a1_get_job_status 200 job2 DISABLED
-fi
-
-check_policy_agent_logs
-check_ecs_logs
-check_sdnc_logs
-
-#### TEST COMPLETE ####
-
-store_logs          END
-
-print_result
index 1edefbd..15b5c5b 100755 (executable)
@@ -20,7 +20,7 @@
 
 TC_ONELINE_DESCR="ONAP Use case REQ-626"
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -62,7 +62,7 @@ use_simulator_https
 use_mr_https
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     echo "Version V2 of PMS is needed, exiting..."
     exit 1
@@ -82,9 +82,7 @@ for interface in $TESTED_VARIANTS ; do
 
     clean_environment
 
-    if [ $RUNMODE == "KUBE" ]; then
-        start_kube_proxy
-    fi
+    start_kube_proxy
 
     if [[ $interface = *"DMAAP"* ]]; then
         use_agent_dmaap_https
@@ -213,7 +211,7 @@ for interface in $TESTED_VARIANTS ; do
     # Create policies
     use_agent_rest_http
 
-    api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/1"
+    api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/1"
 
     # Create policies in OSC
     for ((i=1; i<=$OSC_NUM_RICS; i++))
index 845617c..20a02cb 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Preparation demo setup  - populating a number of ric simulators with types and instances"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
@@ -61,16 +61,14 @@ use_sdnc_https
 use_simulator_https
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
     notificationurl=""
 fi
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 OSC_NUM_RICS=6
 STD_NUM_RICS=5
@@ -212,7 +210,7 @@ fi
 # Create policies
 use_agent_rest_http
 
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/1"
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/1"
 
 # Create policies in OSC
 for ((i=1; i<=$OSC_NUM_RICS; i++))
index 15e7377..c93a6d7 100755 (executable)
@@ -20,7 +20,7 @@
 TC_ONELINE_DESCR="Preparation demo setup  - policy management and enrichment information"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC ECS PRODSTUB RC HTTPPROXY NGW"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC ECS PRODSTUB RC HTTPPROXY KUBEPROXY NGW"
 
 #App names to include in the test when running kubernetes, space separated list
 KUBE_INCLUDED_IMAGES=" MR CR PA RC PRODSTUB RICSIM CP ECS SDNC HTTPPROXY KUBEPROXY NGW"
@@ -73,7 +73,7 @@ fi
 
 
 if [ "$PMS_VERSION" == "V2" ]; then
-    notificationurl=$CR_SERVICE_PATH"/test"
+    notificationurl=$CR_SERVICE_APP_PATH"/test"
 else
    echo "PMS VERSION 2 (V2) is required"
    exit 1
@@ -81,9 +81,7 @@ fi
 
 clean_environment
 
-if [ $RUNMODE == "KUBE" ]; then
-    start_kube_proxy
-fi
+start_kube_proxy
 
 STD_NUM_RICS=2
 
@@ -170,7 +168,7 @@ done
 #Check the number of types
 api_equal json:policy-types 2 300
 
-api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/1"
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_APP_PATH/1"
 
 # Create policies in STD
 for ((i=1; i<=$STD_NUM_RICS; i++))
@@ -207,8 +205,8 @@ fi
 TARGET1="$RIC_SIM_HTTPX://$RIC_G1_1:$RIC_SIM_PORT/datadelivery"
 TARGET2="$RIC_SIM_HTTPX://$RIC_G1_1:$RIC_SIM_PORT/datadelivery"
 
-STATUS1="$CR_SERVICE_PATH/callbacks/job1-status"
-STATUS2="$CR_SERVICE_PATH/callbacks/job2-status"
+STATUS1="$CR_SERVICE_APP_PATH/callbacks/job1-status"
+STATUS2="$CR_SERVICE_APP_PATH/callbacks/job2-status"
 
 prodstub_arm_producer 200 prod-a
 prodstub_arm_type 200 prod-a type1
index c97f2ab..1f8ef5d 100644 (file)
@@ -42,12 +42,15 @@ ORAN CHERRY
 
 ORAN D-RELEASE
 =========
->```./PM_EI_DEMO.sh remote-remove  docker   --env-file ../common/test_env-oran-d-release.sh  --use-release-image SDNC```
+>```./PM_EI_DEMO.sh remote-remove  docker  release  --env-file ../common/test_env-oran-d-release.sh  --use-release-image SDNC```
 
->```./PM_EI_DEMO.sh remote-remove  kube   --env-file ../common/test_env-oran-d-release.sh  --use-release-image SDNC```
+>```./PM_EI_DEMO.sh remote-remove  kube  release  --env-file ../common/test_env-oran-d-release.sh  --use-release-image SDNC```
 
-Note that D-Release has not updated the SDNC so cherry release is used<br>
-Note: When D-Release is released, add the 'release' arg to run released images.
+ORAN E-RELEASE
+=========
+>```./PM_EI_DEMO.sh remote-remove  docker  --env-file ../common/test_env-oran-e-release.sh```
+
+>```./PM_EI_DEMO.sh remote-remove  kube  --env-file ../common/test_env-oran-e-release.sh```
 
 ONAP GUILIN
 ===========
@@ -65,9 +68,9 @@ ONAP HONOLULU
 
 ONAP ISTANBUL
 =============
->```./PM_EI_DEMO.sh remote-remove  docker   --env-file ../common/test_env-onap-istanbul.sh```
+>```./PM_EI_DEMO.sh remote-remove  docker  release  --env-file ../common/test_env-onap-istanbul.sh```
 
->```./PM_EI_DEMO.sh remote-remove  kube   --env-file ../common/test_env-onap-istanbul.sh```
+>```./PM_EI_DEMO.sh remote-remove  kube  release  --env-file ../common/test_env-onap-istanbul.sh```
 
 Note: When istanbul is released, add the 'release' arg to run released images.
 
@@ -86,6 +89,14 @@ The numbering in each series corresponds to the following groupings
 
 900-999 - Misc test
 
+11XX - ECS API Tests
+
+18XX - ECS Stability and capacity test
+
+2000 - Southbound http proxy tests
+
+30XX - rApp tests
+
 Suites
 
 To get an overview of the available test scripts, use the following command to print the test script description:
diff --git a/test/auto-test/override_ftc_helm_e_release.sh b/test/auto-test/override_ftc_helm_e_release.sh
new file mode 100644 (file)
index 0000000..c23a310
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+
+# Override file example
+
+KUBE_A1SIM_NAMESPACE="a1-sim"
+
+RIC_SIM_PREFIX="a1-sim"
index ec1e062..47b4514 100755 (executable)
 TC_ONELINE_DESCR="Starts DMAAP MR"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="MR DMAAPMR"
+DOCKER_INCLUDED_IMAGES="MR DMAAPMR KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="MR DMAAPMR KUBEPROXY"
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
 
 #Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
 #the image is not configured in the supplied env_file
 #Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-D-RELEASE ORAN-E-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
@@ -55,10 +55,13 @@ setup_testenvironment
 #### TEST BEGIN ####
 
 clean_environment
+start_kube_proxy
 start_mr
-docker kill mr-stub
+if [ $RUNMODE == "KUBE" ]; then
+    :
+else
+    docker kill $MR_STUB_APP_NAME
+fi
 
 
-print_result
-
 
diff --git a/test/auto-test/testdata/dmaap-adapter/job-template.json b/test/auto-test/testdata/dmaap-adapter/job-template.json
new file mode 100644 (file)
index 0000000..9e26dfe
--- /dev/null
@@ -0,0 +1 @@
+{}
\ No newline at end of file
index 6c277bd..18b9656 100644 (file)
@@ -151,6 +151,8 @@ The script can be started with these arguments
 | `-repo-policy` |  Policy controlling which images to re-tag and push to image repo in param --image-repo. Can be set to 'local' (push on locally built images) or 'remote' (push locally built images and images from nexus repo). Default is 'local' |
 | `--cluster-timeout` |  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds |
 | `--print-stats` |  Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
+| `--override <file>` |  Override setting from the file supplied by --env-file |
+| `--pre-clean` |  Clean kube resouces when running docker and vice versa |
 | `help` | Print this info along with the test script description and the list of app short names supported |
 
 ## Function: setup_testenvironment ##
index 767abb9..a1fd657 100644 (file)
@@ -64,7 +64,7 @@ __PA_kube_scale_zero() {
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
 # This function is called for prestarted apps not managed by the test script.
 __PA_kube_scale_zero_and_wait() {
-       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app nonrtric-policymanagementservice
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-policymanagementservice
 }
 
 # Delete all kube resouces for the app
@@ -77,65 +77,45 @@ __PA_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __PA_store_docker_logs() {
-       docker logs $POLICY_AGENT_APP_NAME > $1$2_policy-agent.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_policy-agent.log 2>&1
+       else
+               docker logs $POLICY_AGENT_APP_NAME > $1$2_policy-agent.log 2>&1
+       fi
 }
 
-#######################################################
-
-## Access to Policy agent
-# Host name may be changed if app started by kube
-# Direct access from script
-PA_HTTPX="http"
-PA_HOST_NAME=$LOCALHOST_NAME
-PA_PATH=$PA_HTTPX"://"$PA_HOST_NAME":"$POLICY_AGENT_EXTERNAL_PORT
-
-# PA_ADAPTER used for switch between REST and DMAAP
-PA_ADAPTER_TYPE="REST"
-PA_ADAPTER=$PA_PATH
-
-# Make curl retries towards the agent for http response codes set in this env var, space separated list of codes
-AGENT_RETRY_CODES=""
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__PA_initial_setup() {
+       use_agent_rest_http
+}
 
-#Save first worker node the pod is started on
-__PA_WORKER_NODE=""
+#######################################################
 
 ###########################
 ### Policy Agents functions
 ###########################
 
-# All calls to the agent will be directed to the agent REST interface from now on
+# Set http as the protocol to use for all communication to the Policy Agent
 # args: -
 # (Function for test scripts)
 use_agent_rest_http() {
-       echo -e $BOLD"Agent protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD and $BOLD REST $EBOLD towards the agent"
-       PA_HTTPX="http"
-       PA_PATH=$PA_HTTPX"://"$PA_HOST_NAME":"$POLICY_AGENT_EXTERNAL_PORT
-
-       PA_ADAPTER_TYPE="REST"
-       PA_ADAPTER=$PA_PATH
-       echo ""
+       __agent_set_protocoll "http" $POLICY_AGENT_INTERNAL_PORT $POLICY_AGENT_EXTERNAL_PORT
 }
 
-# All calls to the agent will be directed to the agent REST interface from now on
+# Set https as the protocol to use for all communication to the Policy Agent
 # args: -
 # (Function for test scripts)
 use_agent_rest_https() {
-       echo -e $BOLD"Agent protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD and $BOLD REST $EBOLD towards the agent"
-       PA_HTTPX="https"
-       PA_PATH=$PA_HTTPX"://"$PA_HOST_NAME":"$POLICY_AGENT_EXTERNAL_SECURE_PORT
-
-       PA_ADAPTER_TYPE="REST"
-       PA_ADAPTER=$PA_PATH
-       echo ""
+       __agent_set_protocoll "https" $POLICY_AGENT_INTERNAL_SECURE_PORT $POLICY_AGENT_EXTERNAL_SECURE_PORT
 }
 
 # All calls to the agent will be directed to the agent dmaap interface over http from now on
 # args: -
 # (Function for test scripts)
 use_agent_dmaap_http() {
-       echo -e $BOLD"Agent dmaap protocol setting"$EBOLD
+       echo -e $BOLD"$POLICY_AGENT_DISPLAY_NAME dmaap protocol setting"$EBOLD
        echo -e " Using $BOLD http $EBOLD and $BOLD DMAAP $EBOLD towards the agent"
        PA_ADAPTER_TYPE="MR-HTTP"
        echo ""
@@ -145,13 +125,88 @@ use_agent_dmaap_http() {
 # args: -
 # (Function for test scripts)
 use_agent_dmaap_https() {
-       echo -e $BOLD"Agent dmaap protocol setting"$EBOLD
+       echo -e $BOLD"$POLICY_AGENT_DISPLAY_NAME dmaap protocol setting"$EBOLD
        echo -e " Using $BOLD https $EBOLD and $BOLD DMAAP $EBOLD towards the agent"
        echo -e $YELLOW" Setting http instead of https - MR only uses http"$EYELLOW
        PA_ADAPTER_TYPE="MR-HTTPS"
        echo ""
 }
 
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__agent_set_protocoll() {
+       echo -e $BOLD"$POLICY_AGENT_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $POLICY_AGENT_DISPLAY_NAME"
+
+       ## Access to Dmaap adapter
+
+       PA_SERVICE_PATH=$1"://"$POLICY_AGENT_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               PA_SERVICE_PATH=$1"://"$POLICY_AGENT_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # PA_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
+       PA_ADAPTER_TYPE="REST"
+       PA_ADAPTER=$PA_SERVICE_PATH
+
+       echo ""
+}
+
+# Make curl retries towards the agent for http response codes set in this env var, space separated list of codes
+AGENT_RETRY_CODES=""
+
+#Save first worker node the pod is started on
+__PA_WORKER_NODE=""
+
+# Export env vars for config files, docker compose and kube resources
+# args: PROXY|NOPROXY
+__export_agent_vars() {
+
+               export POLICY_AGENT_APP_NAME
+               export POLICY_AGENT_APP_NAME_ALIAS
+               export POLICY_AGENT_DISPLAY_NAME
+
+               export KUBE_NONRTRIC_NAMESPACE
+               export POLICY_AGENT_IMAGE
+               export POLICY_AGENT_INTERNAL_PORT
+               export POLICY_AGENT_INTERNAL_SECURE_PORT
+               export POLICY_AGENT_EXTERNAL_PORT
+               export POLICY_AGENT_EXTERNAL_SECURE_PORT
+               export POLICY_AGENT_CONFIG_MOUNT_PATH
+               export POLICY_AGENT_DATA_MOUNT_PATH
+               export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config"
+               export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data"
+               export POLICY_AGENT_PKG_NAME
+               export CONSUL_HOST
+               export CONSUL_INTERNAL_PORT
+               export CONFIG_BINDING_SERVICE
+               export POLICY_AGENT_CONFIG_KEY
+               export DOCKER_SIM_NWNAME
+               export POLICY_AGENT_HOST_MNT_DIR
+               export POLICY_AGENT_CONFIG_FILE
+
+               export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv"
+               export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc"
+               ##Create a unique path for the pv each time to prevent a previous volume to be reused
+               export POLICY_AGENT_PV_PATH="padata-"$(date +%s)
+               export POLICY_AGENT_CONTAINER_MNT_DIR
+
+               if [ $1 == "PROXY" ]; then
+                       export AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
+                       export AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
+                       if [ $AGENT_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$AGENT_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
+                               echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
+                       else
+                               echo " Configured with http proxy"
+                       fi
+               else
+                       export AGENT_HTTP_PROXY_CONFIG_PORT=0
+                       export AGENT_HTTP_PROXY_CONFIG_HOST_NAME=""
+                       echo " Configured without http proxy"
+               fi
+}
+
+
 # Start the policy agent
 # args: (docker) PROXY|NOPROXY <config-file>
 # args: (kube) PROXY|NOPROXY <config-file> [ <data-file>]
@@ -194,42 +249,7 @@ start_policy_agent() {
                        #Check if nonrtric namespace exists, if not create it
                        __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
 
-                       #Export all vars needed for service and deployment
-                       export POLICY_AGENT_APP_NAME
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export POLICY_AGENT_IMAGE
-                       export POLICY_AGENT_INTERNAL_PORT
-                       export POLICY_AGENT_INTERNAL_SECURE_PORT
-                       export POLICY_AGENT_EXTERNAL_PORT
-                       export POLICY_AGENT_EXTERNAL_SECURE_PORT
-                       export POLICY_AGENT_CONFIG_MOUNT_PATH
-                       export POLICY_AGENT_DATA_MOUNT_PATH
-                       export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config"
-                       export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data"
-                       export POLICY_AGENT_PKG_NAME
-
-                       export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv"
-                       export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc"
-                       ##Create a unique path for the pv each time to prevent a previous volume to be reused
-                       export POLICY_AGENT_PV_PATH="padata-"$(date +%s)
-                       export POLICY_AGENT_CONTAINER_MNT_DIR
-
-                       if [ $1 == "PROXY" ]; then
-                               AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
-                               AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
-                               if [ $AGENT_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$AGENT_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
-                                       echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
-                               else
-                                       echo " Configured with http proxy"
-                               fi
-                       else
-                               AGENT_HTTP_PROXY_CONFIG_PORT=0
-                               AGENT_HTTP_PROXY_CONFIG_HOST_NAME=""
-                               echo " Configured without http proxy"
-                       fi
-                       export AGENT_HTTP_PROXY_CONFIG_PORT
-                       export AGENT_HTTP_PROXY_CONFIG_HOST_NAME
-
+                       __export_agent_vars $1
 
                        # Create config map for config
                        configfile=$PWD/tmp/$POLICY_AGENT_CONFIG_FILE
@@ -280,23 +300,8 @@ start_policy_agent() {
                        echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                fi
 
-               echo " Retrieving host and ports for service..."
-               PA_HOST_NAME=$(__kube_get_service_host $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-               POLICY_AGENT_EXTERNAL_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               POLICY_AGENT_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $PA_HOST_NAME $POLICY_AGENT_EXTERNAL_PORT $POLICY_AGENT_EXTERNAL_SECURE_PORT"
-
-               if [ $PA_HTTPX == "http" ]; then
-                       PA_PATH=$PA_HTTPX"://"$PA_HOST_NAME":"$POLICY_AGENT_EXTERNAL_PORT
-               else
-                       PA_PATH=$PA_HTTPX"://"$PA_HOST_NAME":"$POLICY_AGENT_EXTERNAL_SECURE_PORT
-               fi
-               __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+               __check_service_start $POLICY_AGENT_APP_NAME $PA_SERVICE_PATH$POLICY_AGENT_ALIVE_URL
 
-               if [ $PA_ADAPTER_TYPE == "REST" ]; then
-                       PA_ADAPTER=$PA_PATH
-               fi
        else
                __check_included_image 'PA'
                if [ $? -eq 1 ]; then
@@ -324,40 +329,7 @@ start_policy_agent() {
                fi
                cd $curdir
 
-               #Export all vars needed for docker-compose
-               export POLICY_AGENT_APP_NAME
-               export POLICY_AGENT_APP_NAME_ALIAS
-               export POLICY_AGENT_INTERNAL_PORT
-               export POLICY_AGENT_EXTERNAL_PORT
-               export POLICY_AGENT_INTERNAL_SECURE_PORT
-               export POLICY_AGENT_EXTERNAL_SECURE_PORT
-               export CONSUL_HOST
-               export CONSUL_INTERNAL_PORT
-               export CONFIG_BINDING_SERVICE
-               export POLICY_AGENT_CONFIG_KEY
-               export DOCKER_SIM_NWNAME
-               export POLICY_AGENT_HOST_MNT_DIR
-               export POLICY_AGENT_CONFIG_MOUNT_PATH
-               export POLICY_AGENT_CONFIG_FILE
-               export POLICY_AGENT_PKG_NAME
-               export POLICY_AGENT_DISPLAY_NAME
-               export POLICY_AGENT_CONTAINER_MNT_DIR
-
-               if [ $1 == "PROXY" ]; then
-                       AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
-                       AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
-                       if [ $AGENT_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$AGENT_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
-                               echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
-                       else
-                               echo " Configured with http proxy"
-                       fi
-               else
-                       AGENT_HTTP_PROXY_CONFIG_PORT=0
-                       AGENT_HTTP_PROXY_CONFIG_HOST_NAME=""
-                       echo " Configured without http proxy"
-               fi
-               export AGENT_HTTP_PROXY_CONFIG_PORT
-               export AGENT_HTTP_PROXY_CONFIG_HOST_NAME
+               __export_agent_vars $1
 
                dest_file=$SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_HOST_MNT_DIR/application.yaml
 
@@ -365,7 +337,7 @@ start_policy_agent() {
 
                __start_container $POLICY_AGENT_COMPOSE_DIR "" NODOCKERARGS 1 $POLICY_AGENT_APP_NAME
 
-               __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+               __check_service_start $POLICY_AGENT_APP_NAME $PA_SERVICE_PATH$POLICY_AGENT_ALIVE_URL
        fi
        echo ""
        return 0
@@ -422,7 +394,7 @@ start_stopped_policy_agent() {
                        echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                        res_type=$(__kube_get_resource_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                        __kube_scale $res_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
-                       __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+                       __check_service_start $POLICY_AGENT_APP_NAME $PA_SERVICE_PATH$POLICY_AGENT_ALIVE_URL
                        return 0
                fi
 
@@ -450,7 +422,7 @@ start_stopped_policy_agent() {
                        return 1
                fi
        fi
-       __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+       __check_service_start $POLICY_AGENT_APP_NAME $PA_SERVICE_PATH$POLICY_AGENT_ALIVE_URL
        if [ $? -ne 0 ]; then
                return 1
        fi
@@ -476,7 +448,7 @@ agent_load_config() {
 # (Function for test scripts)
 set_agent_debug() {
        echo -e $BOLD"Setting agent debug logging"$EBOLD
-       curlString="$PA_PATH$POLICY_AGENT_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
+       curlString="$PA_SERVICE_PATH$POLICY_AGENT_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "could not set debug mode" $@
@@ -492,7 +464,7 @@ set_agent_debug() {
 # (Function for test scripts)
 set_agent_trace() {
        echo -e $BOLD"Setting agent trace logging"$EBOLD
-       curlString="$PA_PATH$POLICY_AGENT_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
+       curlString="$PA_SERVICE_PATH$POLICY_AGENT_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "could not set trace mode" $@
@@ -533,9 +505,9 @@ api_equal() {
        if [ $# -eq 2 ] || [ $# -eq 3 ]; then
                if [[ $1 == "json:"* ]]; then
                        if [ "$PMS_VERSION" == "V2" ]; then
-                               __var_test "Policy Agent" $PA_PATH$PMS_API_PREFIX"/v2/" $1 "=" $2 $3
+                               __var_test "Policy Agent" $PA_SERVICE_PATH$PMS_API_PREFIX"/v2/" $1 "=" $2 $3
                        else
-                               __var_test "Policy Agent" $PA_PATH"/" $1 "=" $2 $3
+                               __var_test "Policy Agent" $PA_SERVICE_PATH"/" $1 "=" $2 $3
                        fi
                        return 0
                fi
@@ -1071,10 +1043,8 @@ api_put_policy_parallel() {
        urlbase=${PA_ADAPTER}${query}
 
        httpproxy="NOPROXY"
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       httpproxy=$KUBE_PROXY_PATH
-               fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               httpproxy=$KUBE_PROXY_PATH
        fi
 
        for ((i=1; i<=$pids; i++))
@@ -1266,10 +1236,8 @@ api_delete_policy_parallel() {
        urlbase=${PA_ADAPTER}${query}
 
        httpproxy="NOPROXY"
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       httpproxy=$KUBE_PROXY_PATH
-               fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               httpproxy=$KUBE_PROXY_PATH
        fi
 
        for ((i=1; i<=$pids; i++))
@@ -2208,12 +2176,12 @@ api_get_configuration() {
 pms_kube_pvc_reset() {
        __log_test_start $@
 
-       pvc_name=$(kubectl get pvc -n nonrtric  --no-headers -o custom-columns=":metadata.name" | grep policy)
+       pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep policy)
        if [ -z "$pvc_name" ]; then
                pvc_name=policymanagementservice-vardata-pvc
        fi
        echo " Trying to reset pvc: "$pvc_name
-       __kube_clean_pvc $POLICY_AGENT_APP_NAME nonrtric $pvc_name /var/policy-management-service/database
+       __kube_clean_pvc $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE $pvc_name $POLICY_AGENT_CONTAINER_MNT_DIR
 
        __log_test_pass
        return 0
index 615ccab..17f80a5 100644 (file)
@@ -29,15 +29,14 @@ __do_curl_to_api() {
        TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
     echo " (${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
        proxyflag=""
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       if [ $KUBE_PROXY_HTTPX == "http" ]; then
-                               proxyflag=" --proxy $KUBE_PROXY_PATH"
-                       else
-                               proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
-                       fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
                fi
        fi
+
        paramError=0
        input_url=$3
     if [ $# -gt 0 ]; then
@@ -64,6 +63,18 @@ __do_curl_to_api() {
                        __ADAPTER=$NGW_ADAPTER
                        __ADAPTER_TYPE=$NGW_ADAPTER_TYPE
             __RETRY_CODES=""
+        elif [ $1 == "DMAAPADP" ]; then
+                       __ADAPTER=$DMAAP_ADP_ADAPTER
+                       __ADAPTER_TYPE=$DMAAP_ADP_ADAPTER_TYPE
+            __RETRY_CODES=""
+        elif [ $1 == "DMAAPMED" ]; then
+                       __ADAPTER=$DMAAP_MED_ADAPTER
+                       __ADAPTER_TYPE=$DMAAP_MED_ADAPTER_TYPE
+            __RETRY_CODES=""
+        elif [ $1 == "MRSTUB" ]; then
+                       __ADAPTER=$MR_STUB_ADAPTER
+                       __ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE
+            __RETRY_CODES=""
         else
             paramError=1
         fi
@@ -104,8 +115,9 @@ __do_curl_to_api() {
                elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then
                        oper="POST"
                        accept=" -H accept:*/*"
-                       if [ $# -ne 3 ]; then
-                               paramError=1
+                       if [ $# -eq 4 ]; then
+                               file=" --data-binary @$4"
+                               accept=" -H accept:application/json"
                        fi
                elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then
                        oper="DELETE"
diff --git a/test/common/clean_docker.sh b/test/common/clean_docker.sh
new file mode 100755 (executable)
index 0000000..70e42da
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+
+# Script to clean all docker containers having the label 'nrttest_app', i.e started by autotest
+
+echo "Will stop and remove all docker containers with label 'nrttest_app'"
+echo " Stopping containers..."
+docker stop $(docker ps -qa  --filter "label=nrttest_app") 2> /dev/null
+echo " Removing stopped containers..."
+docker rm $(docker ps -qa  --filter "label=nrttest_app") 2> /dev/null
+
+echo "Done"
\ No newline at end of file
index 8e2f676..405c194 100755 (executable)
@@ -28,64 +28,164 @@ YELLOW="\033[33m\033[1m"
 EYELLOW="\033[0m"
 SAMELINE="\033[0K\r"
 
-__kube_delete_all_resources() {
-       echo "Deleting all from namespace: "$1
+__kube_scale_all_resources() {
+
+       echo " Scaling down in namespace $1 ..."
        namespace=$1
-       resources="deployments replicaset statefulset services pods configmaps pvc pv"
-       deleted_resourcetypes=""
+       resources="deployment replicaset statefulset"
        for restype in $resources; do
                result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
-                       deleted_resourcetypes=$deleted_resourcetypes" "$restype
                        for resid in $result; do
-                               if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then
-                                       kubectl scale  $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
-                                       T_START=$SECONDS
-                                       count=1
-                                       while [ $count -ne 0 ]; do
-                                               count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
-                                               echo -ne "  Scaling $restype $resid from namespace $namespace with label autotest to 0,count=$count....$(($SECONDS-$T_START)) seconds"$SAMELINE
-                                               if [ $? -eq 0 ] && [ ! -z "$count" ]; then
-                                                       sleep 0.5
+                               count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+                               if [ $? -eq 0 ] && [ ! -z "$count" ]; then
+                                       if [ $count -ne 0 ]; then
+                                               echo "  Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count."
+                                               kubectl scale  $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
+                                       fi
+                               fi
+                       done
+               fi
+       done
+}
+
+__kube_wait_for_zero_count() {
+       echo " Wait for scaling to zero in namespace $1 ..."
+       namespace=$1
+       resources="deployment replicaset statefulset"
+       for restype in $resources; do
+               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for resid in $result; do
+                               T_START=$SECONDS
+                               count=1
+                               scaled=0
+                               while [ $count -gt 0 ]; do
+                                       count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+                                       if [ $? -eq 0 ] && [ ! -z "$count" ]; then
+                                               if [ $count -ne 0 ]; then
+                                                       echo -ne "  Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count....$(($SECONDS-$T_START)) seconds"$SAMELINE
+                                                       scaled=1
                                                else
-                                                       count=0
+                                                       sleep 0.5
                                                fi
-                                       done
-                                       echo -e "  Scaled $restype $resid from namespace $namespace with label $labelname=$labelid to 0,count=$count....$(($SECONDS-$T_START)) seconds$GREEN OK $EGREEN"
+                                       else
+                                               count=0
+                                       fi
+                               done
+                               if [ $scaled -eq 1 ]; then
+                                       echo -e "  Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count....$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
                                fi
-                               echo -ne "  Deleting $restype $resid from namespace $namespace with label autotest "$SAMELINE
+                       done
+               fi
+       done
+}
+
+__kube_delete_all_resources() {
+       echo " Delete all in namespace $1 ..."
+       namespace=$1
+       resources="deployments replicaset statefulset services pods configmaps pvc "
+       for restype in $resources; do
+               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for resid in $result; do
+                               echo  "  Deleting $restype $resid in namespace $namespace with label autotest "
                                kubectl delete $restype $resid -n $namespace 1> /dev/null 2> /dev/null
-                               if [ $? -eq 0 ]; then
-                                       echo -e "  Deleted $restype $resid from namespace $namespace with label autotest $GREEN OK $EGREEN"
-                               else
-                                       echo -e "  Deleted $restype $resid from namespace $namespace with label autotest $GREEN Does not exist - OK $EGREEN"
-                               fi
-                               #fi
                        done
                fi
        done
-       if [ ! -z "$deleted_resourcetypes" ]; then
-               for restype in $deleted_resources; do
-                       echo -ne "  Waiting for $restype in namespace $namespace with label autotest to be deleted..."$SAMELINE
-                       T_START=$SECONDS
-                       result="dummy"
-                       while [ ! -z "$result" ]; do
-                               sleep 0.5
-                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
-                               echo -ne "  Waiting for $restype in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
-                               if [ -z "$result" ]; then
-                                       echo -e " Waiting for $restype in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
-                               elif [ $(($SECONDS-$T_START)) -gt 300 ]; then
-                                       echo -e " Waiting for $restype in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $RED Failed $ERED"
-                                       result=""
-                               fi
+}
+
+__kube_delete_all_pv() {
+       echo " Delete pv ..."
+       resources="pv"
+       for restype in $resources; do
+               result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for resid in $result; do
+                               echo  "  Deleting $restype $resid with label autotest "
+                               kubectl delete $restype $resid 1> /dev/null 2> /dev/null
                        done
-               done
-       fi
+               fi
+       done
+}
+
+__kube_wait_for_delete() {
+       echo " Wait for delete in namespace $1 ..."
+       namespace=$1
+       resources="deployments replicaset statefulset services pods configmaps pvc "
+       for restype in $resources; do
+               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for resid in $result; do
+                               echo  "  Deleting $restype $resid in namespace $namespace with label autotest "
+                               kubectl delete $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+                               echo -ne "  Waiting for $restype $resid in namespace $namespace with label autotest to be deleted..."$SAMELINE
+                               T_START=$SECONDS
+                               result="dummy"
+                               while [ ! -z "$result" ]; do
+                                       sleep 0.5
+                                       result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+                                       echo -ne "  Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
+                                       if [ -z "$result" ]; then
+                                               echo -e " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
+                                       elif [ $(($SECONDS-$T_START)) -gt 300 ]; then
+                                               echo -e " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $RED Failed $ERED"
+                                               result=""
+                                       fi
+                               done
+                       done
+               fi
+       done
 }
+
+__kube_wait_for_delete_pv() {
+       echo " Wait for delete pv ..."
+       resources="pv "
+       for restype in $resources; do
+               result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for resid in $result; do
+                               echo  "  Deleting $restype $resid with label autotest "
+                               kubectl delete $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+                               echo -ne "  Waiting for $restype $resid with label autotest to be deleted..."$SAMELINE
+                               T_START=$SECONDS
+                               result="dummy"
+                               while [ ! -z "$result" ]; do
+                                       sleep 0.5
+                                       result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+                                       echo -ne "  Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
+                                       if [ -z "$result" ]; then
+                                               echo -e " Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
+                                       elif [ $(($SECONDS-$T_START)) -gt 300 ]; then
+                                               echo -e " Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $RED Failed $ERED"
+                                               result=""
+                                       fi
+                               done
+                       done
+               fi
+       done
+}
+
+
 echo "Will remove all kube resources marked with label 'autotest'"
-__kube_delete_all_resources nonrtric
-__kube_delete_all_resources nonrtric-ft
-__kube_delete_all_resources onap
 
+# List all namespace and scale/delete per namespace
+nss=$(kubectl get ns  -o jsonpath='{.items[*].metadata.name}')
+if [ ! -z "$nss" ]; then
+       for ns in $nss; do
+               __kube_scale_all_resources $ns
+       done
+       for ns in $nss; do
+               __kube_wait_for_zero_count $ns
+       done
+       for ns in $nss; do
+               __kube_delete_all_resources $ns
+       done
+       __kube_delete_all_pv
+       for ns in $nss; do
+               __kube_wait_for_delete $ns
+       done
+       __kube_wait_for_delete_pv
+fi
 echo "Done"
\ No newline at end of file
index 1b5c8a8..747eaab 100644 (file)
@@ -131,21 +131,42 @@ __CBS_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __CONSUL_store_docker_logs() {
-       docker logs $CONSUL_APP_NAME > $1/$2_consul.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               :
+       else
+               docker logs $CONSUL_APP_NAME > $1/$2_consul.log 2>&1
+       fi
 }
 
 # Store docker logs
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __CBS_store_docker_logs() {
-       docker logs $CBS_APP_NAME > $1$2_cbs.log 2>&1
-       body="$(__do_curl $LOCALHOST_HTTP:$CBS_EXTERNAL_PORT/service_component_all/$POLICY_AGENT_APP_NAME)"
-       echo "$body" > $1$2_consul_config.json 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               :
+       else
+               docker logs $CBS_APP_NAME > $1$2_cbs.log 2>&1
+               body="$(__do_curl $LOCALHOST_HTTP:$CBS_EXTERNAL_PORT/service_component_all/$POLICY_AGENT_APP_NAME)"
+               echo "$body" > $1$2_consul_config.json 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CONSUL_initial_setup() {
+       CONSUL_SERVICE_PATH="http://"$CONSUL_APP_NAME":"$CONSUL_INTERNAL_PORT
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CBS_initial_setup() {
+       CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT
 }
 
 #######################################################
 
-CONSUL_PATH="http://$LOCALHOST:$CONSUL_EXTERNAL_PORT"
 
 ####################
 ### Consul functions
@@ -166,14 +187,15 @@ consul_config_app() {
 
        echo " Loading config for "$POLICY_AGENT_APP_NAME" from "$1
 
-       curlString="$LOCALHOST_HTTP:${CONSUL_EXTERNAL_PORT}/v1/kv/${POLICY_AGENT_CONFIG_KEY}?dc=dc1 -X PUT -H Accept:application/json -H Content-Type:application/json -H X-Requested-With:XMLHttpRequest --data-binary @"$1
+       curlString="$CONSUL_SERVICE_PATH/v1/kv/${POLICY_AGENT_CONFIG_KEY}?dc=dc1 -X PUT -H Accept:application/json -H Content-Type:application/json -H X-Requested-With:XMLHttpRequest --data-binary @"$1
+
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                echo -e $RED" FAIL - json config could not be loaded to consul" $ERED
                ((RES_CONF_FAIL++))
                return 1
        fi
-       body="$(__do_curl $LOCALHOST_HTTP:$CBS_EXTERNAL_PORT/service_component_all/$POLICY_AGENT_CONFIG_KEY)"
+       body="$(__do_curl $CBS_SERVICE_PATH/service_component_all/$POLICY_AGENT_CONFIG_KEY)"
        echo $body > "./tmp/.output"$1
 
        if [ $? -ne 0 ]; then
@@ -254,14 +276,14 @@ prepare_consul_config() {
        config_json=$config_json"\n   \"ric\": ["
 
        if [ $RUNMODE == "KUBE" ]; then
-               result=$(kubectl get pods -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
+               result=$(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
                rics=""
                ric_cntr=0
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for im in $result; do
                                if [[ $im != *"-0" ]]; then
-                                       ric_subdomain=$(kubectl get pod $im -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.spec.subdomain}')
-                                       rics=$rics" "$im"."$ric_subdomain".nonrtric"
+                                       ric_subdomain=$(kubectl get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
+                                       rics=$rics" "$im"."$ric_subdomain"."$KUBE_A1SIM_NAMESPACE
                                        let ric_cntr=ric_cntr+1
                                fi
                        done
@@ -339,8 +361,8 @@ start_consul_cbs() {
 
        __start_container $CONSUL_CBS_COMPOSE_DIR "" NODOCKERARGS 2 $CONSUL_APP_NAME $CBS_APP_NAME
 
-       __check_service_start $CONSUL_APP_NAME "http://"$LOCALHOST_NAME":"$CONSUL_EXTERNAL_PORT$CONSUL_ALIVE_URL
-       __check_service_start $CBS_APP_NAME "http://"$LOCALHOST_NAME":"$CBS_EXTERNAL_PORT$CBS_ALIVE_URL
+       __check_service_start $CONSUL_APP_NAME $CONSUL_SERVICE_PATH$CONSUL_ALIVE_URL
+       __check_service_start $CBS_APP_NAME $CBS_SERVICE_PATH$CBS_ALIVE_URL
 
        echo ""
 }
index 4d96880..eda6fe3 100644 (file)
@@ -78,19 +78,22 @@ __CP_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __CP_store_docker_logs() {
-       docker logs $CONTROL_PANEL_APP_NAME > $1$2_control-panel.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=CP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_control-panel.log 2>&1
+       else
+               docker logs $CONTROL_PANEL_APP_NAME > $1$2_control-panel.log 2>&1
+       fi
 }
 
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CP_initial_setup() {
+       use_control_panel_http
+}
 #######################################################
 
 
-## Access to control panel
-# Host name may be changed if app started by kube
-# Direct access from script
-CP_HTTPX="http"
-CP_HOST_NAME=$LOCALHOST_NAME
-CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_PORT
-
 ###########################
 ### Control Panel functions
 ###########################
@@ -99,24 +102,72 @@ CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_PORT
 # args: -
 # (Function for test scripts)
 use_control_panel_http() {
-       echo -e $BOLD"Control Panel, CP, protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards CP"
-       CP_HTTPX="http"
-       CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_PORT
-       echo ""
+       __control_panel_set_protocoll "http" $CONTROL_PANEL_INTERNAL_PORT $CONTROL_PANEL_EXTERNAL_PORT
 }
 
 # Set https as the protocol to use for all communication to the Control Panel
 # args: -
 # (Function for test scripts)
 use_control_panel_https() {
-       echo -e $BOLD"Control Panel, CP, protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards CP"
-       CP_HTTPX="https"
-       CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_SECURE_PORT
+       __control_panel_set_protocoll "https" $CONTROL_PANEL_INTERNAL_SECURE_PORT $CONTROL_PANEL_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__control_panel_set_protocoll() {
+       echo -e $BOLD"$CONTROL_PANEL_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $CONTROL_PANEL_DISPLAY_NAME"
+
+       CP_SERVICE_PATH=$1"://"$CONTROL_PANEL_APP_NAME":"$2
+       if [ $RUNMODE == "KUBE" ]; then
+               CP_SERVICE_PATH=$1"://"$CONTROL_PANEL_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3
+       fi
        echo ""
 }
 
+# Export env vars for config files, docker compose and kube resources
+# args: -
+__control_panel_export_vars() {
+       #Export all vars needed for service and deployment
+       export CONTROL_PANEL_APP_NAME
+       export CONTROL_PANEL_DISPLAY_NAME
+       export KUBE_NONRTRIC_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export CONTROL_PANEL_IMAGE
+       export CONTROL_PANEL_INTERNAL_PORT
+       export CONTROL_PANEL_INTERNAL_SECURE_PORT
+       export CONTROL_PANEL_EXTERNAL_PORT
+       export CONTROL_PANEL_EXTERNAL_SECURE_PORT
+       export CONTROL_PANEL_CONFIG_MOUNT_PATH
+       export CONTROL_PANEL_CONFIG_FILE
+       export CONTROL_PANEL_HOST_MNT_DIR
+
+       export CP_CONFIG_CONFIGMAP_NAME=$CONTROL_PANEL_APP_NAME"-config"
+       export CP_PROXY_CONFIGMAP_NAME=$CONTROL_PANEL_APP_NAME"-proxy"
+
+       export CONTROL_PANEL_PATH_POLICY_PREFIX
+       export CONTROL_PANEL_PATH_ECS_PREFIX
+       export CONTROL_PANEL_PATH_ECS_PREFIX2
+
+       export NRT_GATEWAY_APP_NAME
+       export NRT_GATEWAY_EXTERNAL_PORT
+
+       export POLICY_AGENT_EXTERNAL_SECURE_PORT
+       export ECS_EXTERNAL_SECURE_PORT
+
+       if [ $RUNMODE == "KUBE" ]; then
+               export NGW_DOMAIN_NAME=$NRT_GATEWAY_APP_NAME.$KUBE_NONRTRIC_NAMESPACE.svc.cluster.local  # suffix needed for nginx name resolution
+               export CP_NGINX_RESOLVER=$CONTROL_PANEL_NGINX_KUBE_RESOLVER
+       else
+               export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME
+               export ECS_DOMAIN_NAME=$ECS_APP_NAME
+
+               export NGW_DOMAIN_NAME=$NRT_GATEWAY_APP_NAME
+               export CP_NGINX_RESOLVER=$CONTROL_PANEL_NGINX_DOCKER_RESOLVER
+       fi
+}
+
 # Start the Control Panel container
 # args: -
 # (Function for test scripts)
@@ -157,26 +208,7 @@ start_control_panel() {
 
                        echo -e " Creating $CONTROL_PANEL_APP_NAME app and expose service"
 
-                       #Export all vars needed for service and deployment
-                       export CONTROL_PANEL_APP_NAME
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export CONTROL_PANEL_IMAGE
-                       export CONTROL_PANEL_INTERNAL_PORT
-                       export CONTROL_PANEL_INTERNAL_SECURE_PORT
-                       export CONTROL_PANEL_EXTERNAL_PORT
-                       export CONTROL_PANEL_EXTERNAL_SECURE_PORT
-                       export CONTROL_PANEL_CONFIG_MOUNT_PATH
-                       export CONTROL_PANEL_CONFIG_FILE
-                       export CP_CONFIG_CONFIGMAP_NAME=$CONTROL_PANEL_APP_NAME"-config"
-                       export CP_PROXY_CONFIGMAP_NAME=$CONTROL_PANEL_APP_NAME"-proxy"
-
-                       export NGW_DOMAIN_NAME=$NRT_GATEWAY_APP_NAME.$KUBE_NONRTRIC_NAMESPACE.svc.cluster.local  # suffix needed for nginx name resolution
-                       export NRT_GATEWAY_EXTERNAL_PORT
-                       export CONTROL_PANEL_PATH_POLICY_PREFIX
-                       export CONTROL_PANEL_PATH_ECS_PREFIX
-                       export CONTROL_PANEL_PATH_ECS_PREFIX2
-
-                       export CP_NGINX_RESOLVER=$CONTROL_PANEL_NGINX_KUBE_RESOLVER
+                       __control_panel_export_vars
 
                        #Check if nonrtric namespace exists, if not create it
                        __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
@@ -206,20 +238,12 @@ start_control_panel() {
 
                fi
 
-               echo " Retrieving host and ports for service..."
-               CP_HOST_NAME=$(__kube_get_service_host $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-
-               CONTROL_PANEL_EXTERNAL_PORT=$(__kube_get_service_port $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               CONTROL_PANEL_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
+               __check_service_start $CONTROL_PANEL_APP_NAME $CP_SERVICE_PATH$CONTROL_PANEL_ALIVE_URL
 
-               echo " Host IP, http port, https port: $CP_HOST_NAME $CONTROL_PANEL_EXTERNAL_PORT $CONTROL_PANEL_EXTERNAL_SECURE_PORT"
-               if [ $CP_HTTPX == "http" ]; then
-                       CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_PORT
-               else
-                       CP_PATH=$CP_HTTPX"://"$CP_HOST_NAME":"$CONTROL_PANEL_EXTERNAL_SECURE_PORT
-               fi
+               CP_PORT1=$(__kube_get_service_nodeport $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
+               CP_PORT2=$(__kube_get_service_nodeport $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
 
-               __check_service_start $CONTROL_PANEL_APP_NAME $CP_PATH$CONTROL_PANEL_ALIVE_URL
+               echo " $CONTROL_PANEL_DISPLAY_NAME node ports (http/https): $CP_PORT1 $CP_PORT2"
        else
                # Check if docker app shall be fully managed by the test script
                __check_included_image 'CP'
@@ -229,36 +253,7 @@ start_control_panel() {
                        exit
                fi
 
-               # Export needed vars for docker compose
-        export CONTROL_PANEL_APP_NAME
-        export CONTROL_PANEL_INTERNAL_PORT
-        export CONTROL_PANEL_EXTERNAL_PORT
-        export CONTROL_PANEL_INTERNAL_SECURE_PORT
-        export CONTROL_PANEL_EXTERNAL_SECURE_PORT
-        export DOCKER_SIM_NWNAME
-
-       export CONTROL_PANEL_HOST_MNT_DIR
-               export CONTROL_PANEL_CONFIG_FILE
-               export CONTROL_PANEL_CONFIG_MOUNT_PATH
-
-               export NRT_GATEWAY_APP_NAME
-               export NRT_GATEWAY_EXTERNAL_PORT
-
-               export POLICY_AGENT_EXTERNAL_SECURE_PORT
-               export ECS_EXTERNAL_SECURE_PORT
-               export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME
-               export ECS_DOMAIN_NAME=$ECS_APP_NAME
-
-               export CONTROL_PANEL_HOST_MNT_DIR
-               export CONTROL_PANEL_CONFIG_MOUNT_PATH
-               export CONTROL_PANEL_CONFIG_FILE
-               export CONTROL_PANEL_DISPLAY_NAME
-               export NGW_DOMAIN_NAME=$NRT_GATEWAY_APP_NAME
-               export CONTROL_PANEL_PATH_POLICY_PREFIX
-               export CONTROL_PANEL_PATH_ECS_PREFIX
-               export CONTROL_PANEL_PATH_ECS_PREFIX2
-
-               export CP_NGINX_RESOLVER=$CONTROL_PANEL_NGINX_DOCKER_RESOLVER
+               __control_panel_export_vars
 
                dest_file=$SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_HOST_MNT_DIR/$CONTROL_PANEL_CONFIG_FILE
 
@@ -266,7 +261,9 @@ start_control_panel() {
 
                __start_container $CONTROL_PANEL_COMPOSE_DIR "" NODOCKERARGS 1 $CONTROL_PANEL_APP_NAME
 
-               __check_service_start $CONTROL_PANEL_APP_NAME $CP_PATH$CONTROL_PANEL_ALIVE_URL
+               __check_service_start $CONTROL_PANEL_APP_NAME $CP_SERVICE_PATH$CONTROL_PANEL_ALIVE_URL
+
+               echo " $CONTROL_PANEL_DISPLAY_NAME locahost ports (http/https): $CONTROL_PANEL_EXTERNAL_PORT $CONTROL_PANEL_EXTERNAL_SECURE_PORT"
        fi
        echo ""
 }
index d703d83..4027f30 100644 (file)
@@ -73,7 +73,7 @@ __SDNC_image_data() {
 # All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
 # This function is called for apps fully managed by the test script
 __SDNC_kube_scale_zero() {
-       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest SDNC
+       __kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
 }
 
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
@@ -85,66 +85,82 @@ __SDNC_kube_scale_zero_and_wait() {
 # Delete all kube resouces for the app
 # This function is called for apps managed by the test script.
 __SDNC_kube_delete_all() {
-       __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest SDNC
+       __kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
 }
 
 # Store docker logs
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __SDNC_store_docker_logs() {
-       docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+               podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+               kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+       else
+               docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+       fi
 }
 
-#######################################################
-
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__SDNC_initial_setup() {
+       use_sdnc_http
+}
 
-SDNC_HTTPX="http"
-SDNC_HOST_NAME=$LOCALHOST_NAME
-SDNC_PATH=$SDNC_HTTPX"://"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT
-SDNC_API_PATH=$SDNC_HTTPX"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT$SDNC_API_URL
-#Docker/Kube internal path
-if [ $RUNMODE == "KUBE" ]; then
-       SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_EXTERNAL_PORT
-    #presume correct
-       SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_PORT
-       #test
-       #SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_PORT
-else
-       SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_INTERNAL_PORT
-fi
+#######################################################
 
+# Set http as the protocol to use for all communication to SDNC
+# args: -
+# (Function for test scripts)
 use_sdnc_http() {
-       echo -e $BOLD"SDNC NB protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards SDNC"
-       SDNC_HTTPX="http"
-       SDNC_PATH=$SDNC_HTTPX"://"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT
-       SDNC_API_PATH=$SDNC_HTTPX"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT$SDNC_API_URL
-       if [ $RUNMODE == "KUBE" ]; then
-               #presume correct
-               SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_PORT
-               #test
-               #SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_EXTERNAL_PORT
-       else
-               SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_INTERNAL_PORT
-       fi
-       echo ""
+       __sdnc_set_protocoll "http" $SDNC_INTERNAL_PORT $SDNC_EXTERNAL_PORT
 }
 
+# Set https as the protocol to use for all communication to SDNC
+# args: -
+# (Function for test scripts)
 use_sdnc_https() {
-       echo -e $BOLD"SDNC NB protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards SDNC"
-       SDNC_HTTPX="https"
-       SDNC_PATH=$SDNC_HTTPX"://"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_SECURE_PORT
-       SDNC_API_PATH=$SDNC_HTTPX"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_SECURE_PORT$SDNC_API_URL
+       __sdnc_set_protocoll "https" $SDNC_INTERNAL_SECURE_PORT $SDNC_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__sdnc_set_protocoll() {
+       echo -e $BOLD"$SDNC_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $SDNC_DISPLAY_NAME"
+
+       ## Access to SDNC
+
+       SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL
        if [ $RUNMODE == "KUBE" ]; then
-               #presume correct
-               SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_SECURE_PORT
-               #test
-               #SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_EXTERNAL_SECURE_PORT
-       else
-               SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_INTERNAL_SECURE_PORT
+               SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+               SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL
        fi
        echo ""
+
+}
+
+# Export env vars for config files, docker compose and kube resources
+# args:
+__sdnc_export_vars() {
+       export KUBE_SNDC_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export SDNC_APP_NAME
+       export SDNC_DISPLAY_NAME
+
+       export SDNC_A1_CONTROLLER_IMAGE
+       export SDNC_INTERNAL_PORT
+       export SDNC_EXTERNAL_PORT
+       export SDNC_INTERNAL_SECURE_PORT
+       export SDNC_EXTERNAL_SECURE_PORT
+       export SDNC_A1_TRUSTSTORE_PASSWORD
+       export SDNC_DB_APP_NAME
+       export SDNC_DB_IMAGE
+       export SDNC_USER
+       export SDNC_PWD
 }
 
 ##################
@@ -183,7 +199,7 @@ start_sdnc() {
                if [ $retcode_p -eq 0 ]; then
                        echo -e " Using existing $SDNC_APP_NAME deployment and service"
                        echo " Setting SDNC replicas=1"
-                       __kube_scale deployment $SDNC_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+                       __kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1
                fi
 
                                # Check if app shall be fully managed by the test script
@@ -191,21 +207,10 @@ start_sdnc() {
 
                        echo -e " Creating $SDNC_APP_NAME app and expose service"
 
-                       #Check if nonrtric namespace exists, if not create it
-                       __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
-
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export SDNC_APP_NAME
-                       export SDNC_A1_CONTROLLER_IMAGE
-                       export SDNC_INTERNAL_PORT
-                       export SDNC_EXTERNAL_PORT
-                       export SDNC_INTERNAL_SECURE_PORT
-                       export SDNC_EXTERNAL_SECURE_PORT
-                       export SDNC_A1_TRUSTSTORE_PASSWORD
-                       export SDNC_DB_APP_NAME
-                       export SDNC_DB_IMAGE
-                       export SDNC_USER
-                       export SDNC_PWD
+                       #Check if namespace exists, if not create it
+                       __kube_create_namespace $KUBE_SNDC_NAMESPACE
+
+                       __sdnc_export_vars
 
                        # Create service
                        input_yaml=$SIM_GROUP"/"$SDNC_COMPOSE_DIR"/"svc.yaml
@@ -219,30 +224,7 @@ start_sdnc() {
 
                fi
 
-        echo " Retrieving host and ports for service..."
-               SDNC_HOST_NAME=$(__kube_get_service_host $SDNC_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-               SDNC_EXTERNAL_PORT=$(__kube_get_service_port $SDNC_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               SDNC_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $SDNC_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $SDNC_HOST_NAME $SDNC_EXTERNAL_PORT $SDNC_EXTERNAL_SECURE_PORT"
-
-        if [ $SDNC_HTTPX == "http" ]; then
-                       SDNC_PATH=$SDNC_HTTPX"://"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT
-                       SDNC_API_PATH=$SDNC_HTTPX"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_PORT$SDNC_API_URL
-            #presume correct
-                       SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_PORT
-                       #test
-                       #SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_EXTERNAL_PORT
-               else
-                       SDNC_PATH=$SDNC_HTTPX"://"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_SECURE_PORT
-                       SDNC_API_PATH=$SDNC_HTTPX"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_HOST_NAME":"$SDNC_EXTERNAL_SECURE_PORT$SDNC_API_URL
-            #presume correct
-                       SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME"."$KUBE_NONRTRIC_NAMESPACE":"$SDNC_EXTERNAL_SECURE_PORT
-                       #test
-                       #SDNC_SERVICE_PATH=$SDNC_HTTPX"://"$SDNC_APP_NAME":"$SDNC_EXTERNAL_SECURE_PORT
-               fi
-
-               __check_service_start $SDNC_APP_NAME $SDNC_PATH$SDNC_ALIVE_URL
+               __check_service_start $SDNC_APP_NAME $SDNC_SERVICE_PATH$SDNC_ALIVE_URL
        else
 
                __check_included_image 'SDNC'
@@ -252,21 +234,11 @@ start_sdnc() {
                        exit
                fi
 
-               export SDNC_DB_APP_NAME
-        export SDNC_APP_NAME
-        export SDNC_INTERNAL_PORT
-        export SDNC_EXTERNAL_PORT
-        export SDNC_INTERNAL_SECURE_PORT
-        export SDNC_EXTERNAL_SECURE_PORT
-        export SDNC_A1_TRUSTSTORE_PASSWORD
-        export DOCKER_SIM_NWNAME
-               export SDNC_DISPLAY_NAME
-               export SDNC_USER
-               export SDNC_PWD
+               __sdnc_export_vars
 
                __start_container $SDNC_COMPOSE_DIR $SDNC_COMPOSE_FILE NODOCKERARGS 1 $SDNC_APP_NAME
 
-               __check_service_start $SDNC_APP_NAME $SDNC_PATH$SDNC_ALIVE_URL
+               __check_service_start $SDNC_APP_NAME $SDNC_SERVICE_PATH$SDNC_ALIVE_URL
        fi
     echo ""
     return 0
@@ -315,7 +287,7 @@ start_stopped_sdnc() {
                        return 1
                fi
        fi
-       __check_service_start $SDNC_APP_NAME $SDNC_PATH$SDNC_ALIVE_URL
+       __check_service_start $SDNC_APP_NAME $SDNC_SERVICE_PATH$SDNC_ALIVE_URL
        if [ $? -ne 0 ]; then
                return 1
        fi
@@ -356,16 +328,14 @@ __do_curl_to_controller() {
     echo "$json" > $payload
     echo "  FILE ($payload) : $json"  >> $HTTPLOG
        proxyflag=""
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       if [ $KUBE_PROXY_HTTPX == "http" ]; then
-                               proxyflag=" --proxy $KUBE_PROXY_PATH"
-                       else
-                               proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
-                       fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
                fi
        fi
-    curlString="curl -skw %{http_code} $proxyflag -X POST $SDNC_API_PATH$1 -H accept:application/json -H Content-Type:application/json --data-binary @$payload"
+    curlString="curl -skw %{http_code} $proxyflag -X POST $SDNC_SERVICE_API_PATH$1 -H accept:application/json -H Content-Type:application/json --data-binary @$payload"
     echo "  CMD: "$curlString >> $HTTPLOG
     res=$($curlString)
     retcode=$?
index c19797a..ba46510 100644 (file)
@@ -93,74 +93,81 @@ __CR_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __CR_store_docker_logs() {
-       docker logs $CR_APP_NAME > $1$2_cr.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=CR" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_cr.log 2>&1
+       else
+               docker logs $CR_APP_NAME > $1$2_cr.log 2>&1
+       fi
 }
 
-#######################################################
-
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CR_initial_setup() {
+       use_cr_http
+}
 
-## Access to Callback Receiver
-# Host name may be changed if app started by kube
-# Direct access from script
-CR_HTTPX="http"
-CR_HOST_NAME=$LOCALHOST_NAME
-CR_PATH=$CR_HTTPX"://"$CR_HOST_NAME":"$CR_EXTERNAL_PORT
-#Docker/Kube internal path
-if [ $RUNMODE == "KUBE" ]; then
-       CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME"."$KUBE_SIM_NAMESPACE":"$CR_EXTERNAL_PORT$CR_APP_CALLBACK
-else
-       CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME":"$CR_INTERNAL_PORT$CR_APP_CALLBACK
-fi
-# CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
-CR_ADAPTER_TYPE="REST"
-CR_ADAPTER=$CR_PATH
+#######################################################
 
 ################
 ### CR functions
 ################
 
-# Set http as the protocol to use for all communication to the Callback Receiver
+# Set http as the protocol to use for all communication to the Dmaap adapter
 # args: -
 # (Function for test scripts)
 use_cr_http() {
-       echo -e $BOLD"CR protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards CR"
-
-       CR_HTTPX="http"
-       CR_PATH=$CR_HTTPX"://"$CR_HOST_NAME":"$CR_EXTERNAL_PORT
-
-       #Docker/Kube internal path
-       if [ $RUNMODE == "KUBE" ]; then
-               CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME"."$KUBE_SIM_NAMESPACE":"$CR_EXTERNAL_PORT$CR_APP_CALLBACK
-       else
-               CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME":"$CR_INTERNAL_PORT$CR_APP_CALLBACK
-       fi
-       CR_ADAPTER_TYPE="REST"
-       CR_ADAPTER=$CR_PATH
-       echo ""
+       __cr_set_protocoll "http" $CR_INTERNAL_PORT $CR_EXTERNAL_PORT
 }
 
-# Set https as the protocol to use for all communication to the Callback Receiver
+# Set https as the protocol to use for all communication to the Dmaap adapter
 # args: -
 # (Function for test scripts)
 use_cr_https() {
-       echo -e $BOLD"CR protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards CR"
+       __cr_set_protocoll "https" $CR_INTERNAL_SECURE_PORT $CR_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__cr_set_protocoll() {
+       echo -e $BOLD"$CR_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $CR_DISPLAY_NAME"
 
-       CR_HTTPX="https"
-       CR_PATH=$CR_HTTPX"://"$CR_HOST_NAME":"$CR_EXTERNAL_SECURE_PORT
+       ## Access to Dmaap adapter
 
+       # CR_SERVICE_PATH is the base path to cr
+       CR_SERVICE_PATH=$1"://"$CR_APP_NAME":"$2  # docker access, container->container and script->container via proxy
        if [ $RUNMODE == "KUBE" ]; then
-               CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME"."$KUBE_SIM_NAMESPACE":"$CR_EXTERNAL_SECURE_PORT$CR_APP_CALLBACK
-       else
-               CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME":"$CR_INTERNAL_SECURE_PORT$CR_APP_CALLBACK
+               CR_SERVICE_PATH=$1"://"$CR_APP_NAME.$KUBE_SIM_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
        fi
+       # Service paths are used in test script to provide callbacck urls to app
+       CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR  #Only for messages from dmaap adapter/mediator
+       CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK    #For general callbacks from apps
 
+       # CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
        CR_ADAPTER_TYPE="REST"
-       CR_ADAPTER=$CR_PATH
+       CR_ADAPTER=$CR_SERVICE_PATH
+
        echo ""
 }
 
+# Export env vars for config files, docker compose and kube resources
+# args: <proxy-flag>
+__cr_export_vars() {
+       export CR_APP_NAME
+       export CR_DISPLAY_NAME
+
+       export KUBE_SIM_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export CR_IMAGE
+
+       export CR_INTERNAL_PORT
+       export CR_INTERNAL_SECURE_PORT
+       export CR_EXTERNAL_PORT
+       export CR_EXTERNAL_SECURE_PORT
+}
+
 # Start the Callback reciver in the simulator group
 # args: -
 # (Function for test scripts)
@@ -198,13 +205,8 @@ start_cr() {
 
                if [ $retcode_i -eq 0 ]; then
                        echo -e " Creating $CR_APP_NAME deployment and service"
-                       export CR_APP_NAME
-                       export KUBE_SIM_NAMESPACE
-                       export CR_IMAGE
-                       export CR_INTERNAL_PORT
-                       export CR_INTERNAL_SECURE_PORT
-                       export CR_EXTERNAL_PORT
-                       export CR_EXTERNAL_SECURE_PORT
+
+                       __cr_export_vars
 
                        __kube_create_namespace $KUBE_SIM_NAMESPACE
 
@@ -220,28 +222,10 @@ start_cr() {
 
                fi
 
-               echo " Retrieving host and ports for service..."
-               CR_HOST_NAME=$(__kube_get_service_host $CR_APP_NAME $KUBE_SIM_NAMESPACE)
-
-               CR_EXTERNAL_PORT=$(__kube_get_service_port $CR_APP_NAME $KUBE_SIM_NAMESPACE "http")
-               CR_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $CR_APP_NAME $KUBE_SIM_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $CR_HOST_NAME $CR_EXTERNAL_PORT $CR_EXTERNAL_SECURE_PORT"
-               if [ $CR_HTTPX == "http" ]; then
-                       CR_PATH=$CR_HTTPX"://"$CR_HOST_NAME":"$CR_EXTERNAL_PORT
-                       CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME"."$KUBE_SIM_NAMESPACE":"$CR_EXTERNAL_PORT$CR_APP_CALLBACK
-               else
-                       CR_PATH=$CR_HTTPX"://"$CR_HOST_NAME":"$CR_EXTERNAL_SECURE_PORT
-                       CR_SERVICE_PATH=$CR_HTTPX"://"$CR_APP_NAME"."$KUBE_SIM_NAMESPACE":"$CR_EXTERNAL_SECURE_PORT$CR_APP_CALLBACK
-               fi
-               if [ $CR_ADAPTER_TYPE == "REST" ]; then
-                       CR_ADAPTER=$CR_PATH
-               fi
-
-               __check_service_start $CR_APP_NAME $CR_PATH$CR_ALIVE_URL
+               __check_service_start $CR_APP_NAME $CR_SERVICE_PATH$CR_ALIVE_URL
 
                echo -ne " Service $CR_APP_NAME - reset  "$SAMELINE
-               result=$(__do_curl $CR_APP_NAME $CR_PATH/reset)
+               result=$(__do_curl CR $CR_SERVICE_PATH/reset)
                if [ $? -ne 0 ]; then
                        echo -e " Service $CR_APP_NAME - reset  $RED Failed $ERED - will continue"
                else
@@ -256,17 +240,11 @@ start_cr() {
                        exit
                fi
 
-               export CR_APP_NAME
-               export CR_INTERNAL_PORT
-               export CR_EXTERNAL_PORT
-               export CR_INTERNAL_SECURE_PORT
-               export CR_EXTERNAL_SECURE_PORT
-               export DOCKER_SIM_NWNAME
-               export CR_DISPLAY_NAME
+               __cr_export_vars
 
                __start_container $CR_COMPOSE_DIR "" NODOCKERARGS 1 $CR_APP_NAME
 
-        __check_service_start $CR_APP_NAME $CR_PATH$CR_ALIVE_URL
+        __check_service_start $CR_APP_NAME $CR_SERVICE_PATH$CR_ALIVE_URL
        fi
        echo ""
 }
@@ -281,12 +259,59 @@ start_cr() {
 # (Function for test scripts)
 cr_equal() {
        if [ $# -eq 2 ] || [ $# -eq 3 ]; then
-               __var_test "CR" "$CR_PATH/counter/" $1 "=" $2 $3
+               __var_test "CR" "$CR_SERVICE_PATH/counter/" $1 "=" $2 $3
        else
                __print_err "Wrong args to cr_equal, needs two or three args: <sim-param> <target-value> [ timeout ]" $@
        fi
 }
 
+# Tests if a variable value in the CR contains the target string and and optional timeout
+# Arg: <variable-name> <target-value> - This test set pass or fail depending on if the variable contains
+# the target or not.
+# Arg: <variable-name> <target-value> <timeout-in-sec>  - This test waits up to the timeout seconds
+# before setting pass or fail depending on if the variable value contains the target
+# value or not.
+# (Function for test scripts)
+cr_contains_str() {
+
+       if [ $# -eq 2 ] || [ $# -eq 3 ]; then
+               __var_test "CR" "$CR_SERVICE_PATH/counter/" $1 "contain_str" $2 $3
+               return 0
+       else
+               __print_err "needs two or three args: <sim-param> <target-value> [ timeout ]"
+               return 1
+       fi
+}
+
+# Read a variable value from CR sim and send to stdout. Arg: <variable-name>
+cr_read() {
+       echo "$(__do_curl $CR_SERVICE_PATH/counter/$1)"
+}
+
+# Function to configure write delay on callbacks
+# Delay given in seconds.
+# arg <response-code> <delay-in-sec>
+# (Function for test scripts)
+cr_delay_callback() {
+       __log_conf_start $@
+
+       if [ $# -ne 2 ]; then
+        __print_err "<response-code> <delay-in-sec>]" $@
+        return 1
+       fi
+
+       res="$(__do_curl_to_api CR POST /forcedelay?delay=$2)"
+       status=${res:${#res}-3}
+
+       if [ $status -ne 200 ]; then
+               __log_conf_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_conf_ok
+       return 0
+}
+
 # CR API: Check the contents of all current ric sync events for one id from PMS
 # <response-code> <id> [ EMPTY | ( <ric-id> )+ ]
 # (Function for test scripts)
@@ -474,4 +499,88 @@ cr_api_reset() {
 
        __log_conf_ok
        return 0
+}
+
+
+# CR API: Check the contents of all json events for path
+# <response-code> <topic-url> (EMPTY | <json-msg>+ )
+# (Function for test scripts)
+cr_api_check_all_genric_json_events() {
+       __log_test_start $@
+
+       if [ $# -lt 3 ]; then
+               __print_err "<response-code> <topic-url> (EMPTY | <json-msg>+ )" $@
+               return 1
+       fi
+
+       query="/get-all-events/"$2
+       res="$(__do_curl_to_api CR GET $query)"
+       status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+       body=${res:0:${#res}-3}
+       targetJson="["
+
+       if [ $3 != "EMPTY" ]; then
+               shift
+               shift
+               while [ $# -gt 0 ]; do
+                       if [ "$targetJson" != "[" ]; then
+                               targetJson=$targetJson","
+                       fi
+                       targetJson=$targetJson$1
+                       shift
+               done
+       fi
+       targetJson=$targetJson"]"
+
+       echo " TARGET JSON: $targetJson" >> $HTTPLOG
+       res=$(python3 ../common/compare_json.py "$targetJson" "$body")
+
+       if [ $res -ne 0 ]; then
+               __log_test_fail_body
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+
+
+# CR API: Check a single (oldest) json event (or none if empty) for path
+# <response-code> <topic-url> (EMPTY | <json-msg> )
+# (Function for test scripts)
+cr_api_check_single_genric_json_event() {
+       __log_test_start $@
+
+       if [ $# -ne 3 ]; then
+               __print_err "<response-code> <topic-url> (EMPTY | <json-msg> )" $@
+               return 1
+       fi
+
+       query="/get-event/"$2
+       res="$(__do_curl_to_api CR GET $query)"
+       status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+       body=${res:0:${#res}-3}
+       targetJson=$3
+
+       echo " TARGET JSON: $targetJson" >> $HTTPLOG
+       res=$(python3 ../common/compare_json.py "$targetJson" "$body")
+
+       if [ $res -ne 0 ]; then
+               __log_test_fail_body
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
 }
\ No newline at end of file
diff --git a/test/common/dmaapadp_api_functions.sh b/test/common/dmaapadp_api_functions.sh
new file mode 100644 (file)
index 0000000..26da2d0
--- /dev/null
@@ -0,0 +1,302 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service managemnt functions test functions for the Dmaap Adatper
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__DMAAPADP_imagesetup() {
+       __check_and_create_image_var DMAAPADP "DMAAP_ADP_IMAGE" "DMAAP_ADP_IMAGE_BASE" "DMAAP_ADP_IMAGE_TAG" $1 "$DMAAP_ADP_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__DMAAPADP_imagepull() {
+       __check_and_pull_image $1 "$DMAAP_ADP_DISPLAY_NAME" $DMAAP_ADP_APP_NAME DMAAP_ADP_IMAGE
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__DMAAPADP_imagebuild() {
+       echo -e $RED" Image for app DMAAPADP shall never be built"$ERED
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__DMAAPADP_image_data() {
+       echo -e "$DMAAP_ADP_DISPLAY_NAME\t$(docker images --format $1 $DMAAP_ADP_IMAGE)" >>   $2
+       if [ ! -z "$DMAAP_ADP_IMAGE_SOURCE" ]; then
+               echo -e "-- source image --\t$(docker images --format $1 $DMAAP_ADP_IMAGE_SOURCE)" >>   $2
+       fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__DMAAPADP_kube_scale_zero() {
+       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest DMAAPADP
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__DMAAPADP_kube_scale_zero_and_wait() {
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-dmaapadapterservice
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__DMAAPADP_kube_delete_all() {
+       __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest DMAAPADP
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__DMAAPADP_store_docker_logs() {
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=DMAAPADP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapadapter.log 2>&1
+       else
+               docker logs $DMAAP_ADP_APP_NAME > $1$2_dmaapadapter.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__DMAAPADP_initial_setup() {
+       use_dmaapadp_http
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Dmaap adapter
+# args: -
+# (Function for test scripts)
+use_dmaapadp_http() {
+       __dmaapadp_set_protocoll "http" $DMAAP_ADP_INTERNAL_PORT $DMAAP_ADP_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Dmaap adapter
+# args: -
+# (Function for test scripts)
+use_dmaapadp_https() {
+       __dmaapadp_set_protocoll "https" $DMAAP_ADP_INTERNAL_SECURE_PORT $DMAAP_ADP_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__dmaapadp_set_protocoll() {
+       echo -e $BOLD"$DMAAP_ADP_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $DMAAP_ADP_DISPLAY_NAME"
+
+       ## Access to Dmaap adapter
+
+       DMAAP_ADP_SERVICE_PATH=$1"://"$DMAAP_ADP_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               DMAAP_ADP_SERVICE_PATH=$1"://"$DMAAP_ADP_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # DMAAP_ADP_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
+       DMAAP_ADP_ADAPTER_TYPE="REST"
+       DMAAP_ADP_ADAPTER=$DMAAP_ADP_SERVICE_PATH
+
+       echo ""
+}
+
+# Export env vars for config files, docker compose and kube resources
+# args: PROXY|NOPROXY
+__dmaapadp_export_vars() {
+
+       export DMAAP_ADP_APP_NAME
+       export DMAAP_ADP_DISPLAY_NAME
+
+       export KUBE_NONRTRIC_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export DMAAP_ADP_IMAGE
+
+       export DMAAP_ADP_INTERNAL_PORT
+       export DMAAP_ADP_INTERNAL_SECURE_PORT
+       export DMAAP_ADP_EXTERNAL_PORT
+       export DMAAP_ADP_EXTERNAL_SECURE_PORT
+
+       export DMAAP_ADP_CONFIG_MOUNT_PATH
+       export DMAAP_ADP_DATA_MOUNT_PATH
+       export DMAAP_ADP_HOST_MNT_DIR
+       export DMAAP_ADP_CONFIG_FILE
+       export DMAAP_ADP_DATA_FILE
+
+       export DMAAP_ADP_CONFIG_CONFIGMAP_NAME=$DMAAP_ADP_APP_NAME"-config"
+       export DMAAP_ADP_DATA_CONFIGMAP_NAME=$DMAAP_ADP_APP_NAME"-data"
+
+       export DMMAAP_ADP_PROXY_FLAG="false"
+
+       if [ $1 == "PROXY" ]; then
+               export DMAAP_ADP_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
+               export DMAAP_ADP_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
+               if [ $DMAAP_ADP_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$DMAAP_ADP_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
+                       echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
+               else
+                       echo " Configured with http proxy"
+               fi
+               export DMMAAP_ADP_PROXY_FLAG="true"
+       else
+               export DMAAP_ADP_HTTP_PROXY_CONFIG_PORT=0
+               export DMAAP_ADP_HTTP_PROXY_CONFIG_HOST_NAME=""
+               echo " Configured without http proxy"
+       fi
+
+
+       # paths to other components
+       export ECS_SERVICE_PATH
+       export DMAAP_ADP_SERVICE_PATH
+       export MR_SERVICE_PATH
+
+}
+
+# Start the Dmaap Adapter
+# args: -
+# (Function for test scripts)
+start_dmaapadp() {
+
+       echo -e $BOLD"Starting $DMAAP_ADP_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Check if app shall be fully managed by the test script
+               __check_included_image "DMAAPADP"
+               retcode_i=$?
+
+               # Check if app shall only be used by the testscipt
+               __check_prestarted_image "DMAAPADP"
+               retcode_p=$?
+
+               if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+                       echo -e $RED"The $DMAAP_ADP_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+                       echo -e $RED"The $DMAAP_ADP_APP_NAME will not be started"$ERED
+                       exit
+               fi
+               if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+                       echo -e $RED"The $DMAAP_ADP_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+                       echo -e $RED"The $DMAAP_ADP_APP_NAME will not be started"$ERED
+                       exit
+               fi
+
+               # Check if app shall be used - not managed - by the test script
+               if [ $retcode_p -eq 0 ]; then
+                       echo -e " Using existing $DMAAP_ADP_APP_NAME deployment and service"
+                       echo " Setting DMAAPADP replicas=1"
+                       __kube_scale statefulset $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+               if [ $retcode_i -eq 0 ]; then
+                       echo -e " Creating $DMAAP_ADP_APP_NAME deployment and service"
+
+                       __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
+
+                       __dmaapadp_export_vars $1
+
+                       # Create config map for config
+                       configfile=$PWD/tmp/$DMAAP_ADP_CONFIG_FILE
+                       #cp $2 $configfile
+                       envsubst < $2 > $configfile
+                       output_yaml=$PWD/tmp/dmaapadp_cfc.yaml
+                       __kube_create_configmap $DMAAP_ADP_CONFIG_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest DMAAPADP $configfile $output_yaml
+
+                       # Create config map for data
+                       data_json=$PWD/tmp/$DMAAP_ADP_DATA_FILE
+                       if [ $# -lt 3 ]; then
+                               #create empty dummy file
+                               echo "{}" > $data_json
+                       else
+                               cp $3 $data_json
+                       fi
+                       output_yaml=$PWD/tmp/dmaapadp_cfd.yaml
+                       __kube_create_configmap $DMAAP_ADP_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest DMAAPADP $data_json $output_yaml
+
+
+                       # Create service
+                       input_yaml=$SIM_GROUP"/"$DMAAP_ADP_COMPOSE_DIR"/"svc.yaml
+                       output_yaml=$PWD/tmp/dmaapadp_svc.yaml
+                       __kube_create_instance service $DMAAP_ADP_APP_NAME $input_yaml $output_yaml
+
+                       # Create app
+                       input_yaml=$SIM_GROUP"/"$DMAAP_ADP_COMPOSE_DIR"/"app.yaml
+                       output_yaml=$PWD/tmp/dmaapadp_app.yaml
+                       __kube_create_instance app $DMAAP_ADP_APP_NAME $input_yaml $output_yaml
+
+               fi
+
+               __check_service_start $DMAAP_ADP_APP_NAME $DMAAP_ADP_SERVICE_PATH$DMAAP_ADP_ALIVE_URL
+
+       else
+               # Check if docker app shall be fully managed by the test script
+               __check_included_image 'DMAAPADP'
+               if [ $? -eq 1 ]; then
+                       echo -e $RED"The $DMAAP_ADP_DISPLAY_NAME app is not included in this test script"$ERED
+                       echo -e $RED"The $DMAAP_ADP_DISPLAY_NAME will not be started"$ERED
+                       exit
+               fi
+
+               __dmaapadp_export_vars $1
+
+               dest_file=$SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_HOST_MNT_DIR/$DMAAP_ADP_CONFIG_FILE
+
+               envsubst < $2 > $dest_file
+
+               dest_file=$SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_HOST_MNT_DIR/$DMAAP_ADP_DATA_FILE
+
+               if [ $# -lt 3 ]; then
+                       #create empty dummy file
+                       echo "{}" > $dest_file
+               else
+                       envsubst < $3 > $dest_file
+               fi
+
+               __start_container $DMAAP_ADP_COMPOSE_DIR "" NODOCKERARGS 1 $DMAAP_ADP_APP_NAME
+
+               __check_service_start $DMAAP_ADP_APP_NAME $DMAAP_ADP_SERVICE_PATH$DMAAP_ADP_ALIVE_URL
+       fi
+       echo ""
+}
+
+# Turn on trace level tracing
+# args: -
+# (Function for test scripts)
+set_dmaapadp_trace() {
+       echo -e $BOLD"$DMAAP_ADP_DISPLAY_NAME trace logging"$EBOLD
+       curlString="$DMAAP_ADP_SERVICE_PATH$DMAAP_ADP_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
+       result=$(__do_curl "$curlString")
+       if [ $? -ne 0 ]; then
+               __print_err "could not set trace mode" $@
+               ((RES_CONF_FAIL++))
+               return 1
+       fi
+       echo ""
+       return 0
+}
diff --git a/test/common/dmaapmed_api_functions.sh b/test/common/dmaapmed_api_functions.sh
new file mode 100644 (file)
index 0000000..16e1ad7
--- /dev/null
@@ -0,0 +1,262 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service managemnt functions test functions for the Dmaap Adatper
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__DMAAPMED_imagesetup() {
+       __check_and_create_image_var DMAAPMED "DMAAP_MED_IMAGE" "DMAAP_MED_IMAGE_BASE" "DMAAP_MED_IMAGE_TAG" $1 "$DMAAP_MED_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__DMAAPMED_imagepull() {
+       __check_and_pull_image $1 "$DMAAP_MED_DISPLAY_NAME" $DMAAP_MED_APP_NAME DMAAP_MED_IMAGE
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__DMAAPMED_imagebuild() {
+       echo -e $RED" Image for app DMAAPMED shall never be built"$ERED
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__DMAAPMED_image_data() {
+       echo -e "$DMAAP_MED_DISPLAY_NAME\t$(docker images --format $1 $DMAAP_MED_IMAGE)" >>   $2
+       if [ ! -z "$DMAAP_MED_IMAGE_SOURCE" ]; then
+               echo -e "-- source image --\t$(docker images --format $1 $DMAAP_MED_IMAGE_SOURCE)" >>   $2
+       fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__DMAAPMED_kube_scale_zero() {
+       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest DMAAPMED
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__DMAAPMED_kube_scale_zero_and_wait() {
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-dmaapmediatorservice
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__DMAAPMED_kube_delete_all() {
+       __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest DMAAPMED
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__DMAAPMED_store_docker_logs() {
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=DMAAPMED" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapmediator.log 2>&1
+       else
+               docker logs $DMAAP_MED_APP_NAME > $1$2_dmaapmediator.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__DMAAPMED_initial_setup() {
+       use_dmaapmed_http
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Dmaap mediator
+# args: -
+# (Function for test scripts)
+use_dmaapmed_http() {
+       __dmaapmed_set_protocoll "http" $DMAAP_MED_INTERNAL_PORT $DMAAP_MED_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Dmaap mediator
+# args: -
+# (Function for test scripts)
+use_dmaapmed_https() {
+       __dmaapmed_set_protocoll "https" $DMAAP_MED_INTERNAL_SECURE_PORT $DMAAP_MED_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__dmaapmed_set_protocoll() {
+       echo -e $BOLD"$DMAAP_MED_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $DMAAP_MED_DISPLAY_NAME"
+
+       ## Access to Dmaap mediator
+
+       DMAAP_MED_SERVICE_PATH=$1"://"$DMAAP_MED_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               DMAAP_MED_SERVICE_PATH=$1"://"$DMAAP_MED_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # DMAAP_MED_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
+       DMAAP_MED_ADAPTER_TYPE="REST"
+       DMAAP_MED_ADAPTER=$DMAAP_MED_SERVICE_PATH
+
+       echo ""
+}
+
+# Export env vars for config files, docker compose and kube resources
+# args: PROXY|NOPROXY
+__dmaapmed_export_vars() {
+
+       export DMAAP_MED_APP_NAME
+       export DMAAP_MED_DISPLAY_NAME
+
+       export KUBE_NONRTRIC_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export DMAAP_MED_IMAGE
+
+       export DMAAP_MED_INTERNAL_PORT
+       export DMAAP_MED_INTERNAL_SECURE_PORT
+       export DMAAP_MED_EXTERNAL_PORT
+       export DMAAP_MED_EXTERNAL_SECURE_PORT
+
+       export DMAAP_MED_DATA_MOUNT_PATH
+       export DMAAP_MED_HOST_MNT_DIR
+       export DMAAP_MED_DATA_FILE
+       export DMAAP_MED_DATA_CONFIGMAP_NAME=$DMAAP_MED_APP_NAME"-data"
+
+       if [ $1 == "PROXY" ]; then
+               export DMAAP_MED_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
+               export DMAAP_MED_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
+               if [ $DMAAP_MED_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$DMAAP_MED_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
+                       echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
+               else
+                       echo " Configured with http proxy"
+               fi
+       else
+               export DMAAP_MED_HTTP_PROXY_CONFIG_PORT=0
+               export DMAAP_MED_HTTP_PROXY_CONFIG_HOST_NAME=""
+               echo " Configured without http proxy"
+       fi
+
+       # paths to other components
+       export ECS_SERVICE_PATH
+
+       export DMAAP_MED_CONF_SELF_HOST=$(echo $DMAAP_MED_SERVICE_PATH | cut -d: -f1-2)
+       export DMAAP_MED_CONF_SELF_PORT=$(echo $DMAAP_MED_SERVICE_PATH | cut -d: -f3)
+       export MR_SERVICE_PATH
+}
+
+# Start the Dmaap mediator
+# args: -
+# (Function for test scripts)
+start_dmaapmed() {
+
+       echo -e $BOLD"Starting $DMAAP_MED_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Check if app shall be fully managed by the test script
+               __check_included_image "DMAAPMED"
+               retcode_i=$?
+
+               # Check if app shall only be used by the testscipt
+               __check_prestarted_image "DMAAPMED"
+               retcode_p=$?
+
+               if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+                       echo -e $RED"The $DMAAP_MED_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+                       echo -e $RED"The $DMAAP_MED_APP_NAME will not be started"$ERED
+                       exit
+               fi
+               if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+                       echo -e $RED"The $DMAAP_MED_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+                       echo -e $RED"The $DMAAP_MED_APP_NAME will not be started"$ERED
+                       exit
+               fi
+
+               # Check if app shall be used - not managed - by the test script
+               if [ $retcode_p -eq 0 ]; then
+                       echo -e " Using existing $DMAAP_MED_APP_NAME deployment and service"
+                       echo " Setting DMAAPMED replicas=1"
+                       __kube_scale statefulset $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+               if [ $retcode_i -eq 0 ]; then
+                       echo -e " Creating $DMAAP_MED_APP_NAME deployment and service"
+
+                       __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
+
+                       __dmaapmed_export_vars $1
+
+                       # Create config map for data
+                       data_json=$PWD/tmp/$DMAAP_MED_DATA_FILE
+                       if [ $# -lt 2 ]; then
+                               #create empty dummy file
+                               echo "{}" > $data_json
+                       else
+                               cp $2 $data_json
+                       fi
+                       output_yaml=$PWD/tmp/dmaapmed_cfd.yaml
+                       __kube_create_configmap $DMAAP_MED_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest DMAAPMED $data_json $output_yaml
+
+                       # Create service
+                       input_yaml=$SIM_GROUP"/"$DMAAP_MED_COMPOSE_DIR"/"svc.yaml
+                       output_yaml=$PWD/tmp/dmaapmed_svc.yaml
+                       __kube_create_instance service $DMAAP_MED_APP_NAME $input_yaml $output_yaml
+
+                       # Create app
+                       input_yaml=$SIM_GROUP"/"$DMAAP_MED_COMPOSE_DIR"/"app.yaml
+                       output_yaml=$PWD/tmp/dmaapmed_app.yaml
+                       __kube_create_instance app $DMAAP_MED_APP_NAME $input_yaml $output_yaml
+
+               fi
+
+               __check_service_start $DMAAP_MED_APP_NAME $DMAAP_MED_SERVICE_PATH$DMAAP_MED_ALIVE_URL
+
+       else
+               # Check if docker app shall be fully managed by the test script
+               __check_included_image 'DMAAPMED'
+               if [ $? -eq 1 ]; then
+                       echo -e $RED"The $DMAAP_MED_DISPLAY_NAME app is not included in this test script"$ERED
+                       echo -e $RED"The $DMAAP_MED_DISPLAY_NAME will not be started"$ERED
+                       exit
+               fi
+
+               __dmaapmed_export_vars $1
+
+               dest_file=$SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_HOST_MNT_DIR/$DMAAP_MED_DATA_FILE
+
+               envsubst < $2 > $dest_file
+
+               __start_container $DMAAP_MED_COMPOSE_DIR "" NODOCKERARGS 1 $DMAAP_MED_APP_NAME
+
+               __check_service_start $DMAAP_MED_APP_NAME $DMAAP_MED_SERVICE_PATH$DMAAP_MED_ALIVE_URL
+       fi
+       echo ""
+}
\ No newline at end of file
index 9bde835..2b434f1 100644 (file)
@@ -64,7 +64,7 @@ __ECS_kube_scale_zero() {
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
 # This function is called for prestarted apps not managed by the test script.
 __ECS_kube_scale_zero_and_wait() {
-       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app nonrtric-enrichmentservice
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-enrichmentservice
 }
 
 # Delete all kube resouces for the app
@@ -77,21 +77,22 @@ __ECS_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __ECS_store_docker_logs() {
-       docker logs $ECS_APP_NAME > $1$2_ecs.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ecs.log 2>&1
+       else
+               docker logs $ECS_APP_NAME > $1$2_ecs.log 2>&1
+       fi
 }
-#######################################################
 
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__ECS_initial_setup() {
+       use_ecs_rest_http
+}
 
-## Access to ECS
-# Host name may be changed if app started by kube
-# Direct access
-ECS_HTTPX="http"
-ECS_HOST_NAME=$LOCALHOST_NAME
-ECS_PATH=$ECS_HTTPX"://"$ECS_HOST_NAME":"$ECS_EXTERNAL_PORT
+#######################################################
 
-# ECS_ADAPTER used for switch between REST and DMAAP (only REST supported currently)
-ECS_ADAPTER_TYPE="REST"
-ECS_ADAPTER=$ECS_PATH
 
 # Make curl retries towards ECS for http response codes set in this env var, space separated list of codes
 ECS_RETRY_CODES=""
@@ -107,28 +108,14 @@ __ECS_WORKER_NODE=""
 # args: -
 # (Function for test scripts)
 use_ecs_rest_http() {
-       echo -e $BOLD"ECS protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD and $BOLD REST $EBOLD towards ECS"
-       ECS_HTTPX="http"
-       ECS_PATH=$ECS_HTTPX"://"$ECS_HOST_NAME":"$ECS_EXTERNAL_PORT
-
-       ECS_ADAPTER_TYPE="REST"
-       ECS_ADAPTER=$ECS_PATH
-       echo ""
+       __ecs_set_protocoll "http" $ECS_INTERNAL_PORT $ECS_EXTERNAL_PORT
 }
 
 # All calls to ECS will be directed to the ECS REST interface from now on
 # args: -
 # (Function for test scripts)
 use_ecs_rest_https() {
-       echo -e $BOLD"ECS protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD and $BOLD REST $EBOLD towards ECS"
-       ECS_HTTPX="https"
-       ECS_PATH=$ECS_HTTPX"://"$ECS_HOST_NAME":"$ECS_EXTERNAL_SECURE_PORT
-
-       ECS_ADAPTER_TYPE="REST"
-       ECS_ADAPTER=$ECS_PATH
-       echo ""
+       __ecs_set_protocoll "https" $ECS_INTERNAL_SECURE_PORT $ECS_EXTERNAL_SECURE_PORT
 }
 
 # All calls to ECS will be directed to the ECS dmaap interface over http from now on
@@ -142,17 +129,68 @@ use_ecs_dmaap_http() {
        echo ""
 }
 
-# All calls to ECS will be directed to the ECS dmaap interface over https from now on
-# args: -
-# (Function for test scripts)
-use_ecs_dmaap_https() {
-       echo -e $BOLD"RICSIM protocol setting"$EBOLD
-       echo -e $RED" - NOT SUPPORTED - "$ERED
-       echo -e " Using $BOLD https $EBOLD and $BOLD REST $EBOLD towards ECS"
-       ECS_ADAPTER_TYPE="MR-HTTPS"
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__ecs_set_protocoll() {
+       echo -e $BOLD"$ECS_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $ECS_DISPLAY_NAME"
+
+       ## Access to ECS
+
+       ECS_SERVICE_PATH=$1"://"$ECS_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               ECS_SERVICE_PATH=$1"://"$ECS_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # ECS_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
+       ECS_ADAPTER_TYPE="REST"
+       ECS_ADAPTER=$ECS_SERVICE_PATH
+
        echo ""
 }
 
+# Export env vars for config files, docker compose and kube resources
+# args: PROXY|NOPROXY
+__ecs_export_vars() {
+               export ECS_APP_NAME
+               export ECS_APP_NAME_ALIAS
+               export KUBE_NONRTRIC_NAMESPACE
+               export ECS_IMAGE
+               export ECS_INTERNAL_PORT
+               export ECS_INTERNAL_SECURE_PORT
+               export ECS_EXTERNAL_PORT
+               export ECS_EXTERNAL_SECURE_PORT
+               export ECS_CONFIG_MOUNT_PATH
+               export ECS_CONFIG_CONFIGMAP_NAME=$ECS_APP_NAME"-config"
+               export ECS_DATA_CONFIGMAP_NAME=$ECS_APP_NAME"-data"
+               export ECS_CONTAINER_MNT_DIR
+               export ECS_HOST_MNT_DIR
+               export ECS_CONFIG_FILE
+               export DOCKER_SIM_NWNAME
+               export ECS_DISPLAY_NAME
+
+
+               export ECS_DATA_PV_NAME=$ECS_APP_NAME"-pv"
+               export ECS_DATA_PVC_NAME=$ECS_APP_NAME"-pvc"
+               #Create a unique path for the pv each time to prevent a previous volume to be reused
+               export ECS_PV_PATH="ecsdata-"$(date +%s)
+
+               if [ $1 == "PROXY" ]; then
+                       export ECS_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
+                       export ECS_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
+                       if [ $ECS_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$ECS_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
+                               echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
+                       else
+                               echo " Configured with http proxy"
+                       fi
+               else
+                       export ECS_HTTP_PROXY_CONFIG_PORT=0
+                       export ECS_HTTP_PROXY_CONFIG_HOST_NAME=""
+                       echo " Configured without http proxy"
+               fi
+}
+
+
 # Start the ECS
 # args: PROXY|NOPROXY <config-file>
 # (Function for test scripts)
@@ -196,38 +234,7 @@ start_ecs() {
                        #Check if nonrtric namespace exists, if not create it
                        __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
 
-                       export ECS_APP_NAME
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export ECS_IMAGE
-                       export ECS_INTERNAL_PORT
-                       export ECS_INTERNAL_SECURE_PORT
-                       export ECS_EXTERNAL_PORT
-                       export ECS_EXTERNAL_SECURE_PORT
-                       export ECS_CONFIG_MOUNT_PATH
-                       export ECS_CONFIG_CONFIGMAP_NAME=$ECS_APP_NAME"-config"
-                       export ECS_DATA_CONFIGMAP_NAME=$ECS_APP_NAME"-data"
-                       export ECS_CONTAINER_MNT_DIR
-
-                       export ECS_DATA_PV_NAME=$ECS_APP_NAME"-pv"
-                       export ECS_DATA_PVC_NAME=$ECS_APP_NAME"-pvc"
-                       #Create a unique path for the pv each time to prevent a previous volume to be reused
-                       export ECS_PV_PATH="ecsdata-"$(date +%s)
-
-                       if [ $1 == "PROXY" ]; then
-                               ECS_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
-                               ECS_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
-                               if [ $ECS_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$ECS_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
-                                       echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
-                               else
-                                       echo " Configured with http proxy"
-                               fi
-                       else
-                               ECS_HTTP_PROXY_CONFIG_PORT=0
-                               ECS_HTTP_PROXY_CONFIG_HOST_NAME=""
-                               echo " Configured without http proxy"
-                       fi
-                       export ECS_HTTP_PROXY_CONFIG_PORT
-                       export ECS_HTTP_PROXY_CONFIG_HOST_NAME
+                       __ecs_export_vars $1
 
                        # Create config map for config
                        datafile=$PWD/tmp/$ECS_CONFIG_FILE
@@ -269,24 +276,9 @@ start_ecs() {
                        echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                fi
 
-               echo " Retrieving host and ports for service..."
-               ECS_HOST_NAME=$(__kube_get_service_host $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-               ECS_EXTERNAL_PORT=$(__kube_get_service_port $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               ECS_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $ECS_HOST_NAME $ECS_EXTERNAL_PORT $ECS_EXTERNAL_SECURE_PORT"
-
-               if [ $ECS_HTTPX == "http" ]; then
-                       ECS_PATH=$ECS_HTTPX"://"$ECS_HOST_NAME":"$ECS_EXTERNAL_PORT
-               else
-                       ECS_PATH=$ECS_HTTPX"://"$ECS_HOST_NAME":"$ECS_EXTERNAL_SECURE_PORT
-               fi
 
-               __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+               __check_service_start $ECS_APP_NAME $ECS_SERVICE_PATH$ECS_ALIVE_URL
 
-               if [ $ECS_ADAPTER_TYPE == "REST" ]; then
-                       ECS_ADAPTER=$ECS_PATH
-               fi
        else
                __check_included_image 'ECS'
                if [ $? -eq 1 ]; then
@@ -312,36 +304,10 @@ start_ecs() {
                else
                        echo " No files in mounted dir or dir does not exists"
                fi
-               cd $curdir
 
-               export ECS_APP_NAME
-               export ECS_APP_NAME_ALIAS
-               export ECS_HOST_MNT_DIR
-               export ECS_CONTAINER_MNT_DIR
-               export ECS_CONFIG_MOUNT_PATH
-               export ECS_CONFIG_FILE
-               export ECS_INTERNAL_PORT
-               export ECS_EXTERNAL_PORT
-               export ECS_INTERNAL_SECURE_PORT
-               export ECS_EXTERNAL_SECURE_PORT
-               export DOCKER_SIM_NWNAME
-               export ECS_DISPLAY_NAME
+               cd $curdir
 
-               if [ $1 == "PROXY" ]; then
-                       ECS_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
-                       ECS_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
-                       if [ $ECS_HTTP_PROXY_CONFIG_PORT -eq 0 ] || [ -z "$ECS_HTTP_PROXY_CONFIG_HOST_NAME" ]; then
-                               echo -e $YELLOW" Warning: HTTP PROXY will not be configured, proxy app not started"$EYELLOW
-                       else
-                               echo " Configured with http proxy"
-                       fi
-               else
-                       ECS_HTTP_PROXY_CONFIG_PORT=0
-                       ECS_HTTP_PROXY_CONFIG_HOST_NAME=""
-                       echo " Configured without http proxy"
-               fi
-               export ECS_HTTP_PROXY_CONFIG_PORT
-               export ECS_HTTP_PROXY_CONFIG_HOST_NAME
+               __ecs_export_vars $1
 
                dest_file=$SIM_GROUP/$ECS_COMPOSE_DIR/$ECS_HOST_MNT_DIR/$ECS_CONFIG_FILE
 
@@ -349,7 +315,7 @@ start_ecs() {
 
                __start_container $ECS_COMPOSE_DIR "" NODOCKERARGS 1 $ECS_APP_NAME
 
-               __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+               __check_service_start $ECS_APP_NAME $ECS_SERVICE_PATH$ECS_ALIVE_URL
        fi
        echo ""
        return 0
@@ -407,7 +373,7 @@ start_stopped_ecs() {
                        echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                        res_type=$(__kube_get_resource_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                        __kube_scale $res_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
-                       __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+                       __check_service_start $ECS_APP_NAME $ECS_SERVICE_PATH$ECS_ALIVE_URL
                        return 0
                fi
 
@@ -435,7 +401,7 @@ start_stopped_ecs() {
                        return 1
                fi
        fi
-       __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+       __check_service_start $ECS_APP_NAME $ECS_SERVICE_PATH$ECS_ALIVE_URL
        if [ $? -ne 0 ]; then
                return 1
        fi
@@ -448,7 +414,7 @@ start_stopped_ecs() {
 # (Function for test scripts)
 set_ecs_debug() {
        echo -e $BOLD"Setting ecs debug logging"$EBOLD
-       curlString="$ECS_PATH$ECS_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
+       curlString="$ECS_SERVICE_PATH$ECS_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "Could not set debug mode" $@
@@ -464,7 +430,7 @@ set_ecs_debug() {
 # (Function for test scripts)
 set_ecs_trace() {
        echo -e $BOLD"Setting ecs trace logging"$EBOLD
-       curlString="$ECS_PATH/actuator/loggers/org.oransc.enrichment -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
+       curlString="$ECS_SERVICE_PATH/actuator/loggers/org.oransc.enrichment -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "Could not set trace mode" $@
@@ -502,7 +468,7 @@ check_ecs_logs() {
 # (Function for test scripts)
 ecs_equal() {
        if [ $# -eq 2 ] || [ $# -eq 3 ]; then
-               __var_test ECS "$ECS_PATH/" $1 "=" $2 $3
+               __var_test ECS "$ECS_SERVICE_PATH/" $1 "=" $2 $3
        else
                __print_err "Wrong args to ecs_equal, needs two or three args: <sim-param> <target-value> [ timeout ]" $@
        fi
@@ -2466,13 +2432,13 @@ ecs_api_admin_reset() {
 ecs_kube_pvc_reset() {
        __log_test_start $@
 
-       pvc_name=$(kubectl get pvc -n nonrtric  --no-headers -o custom-columns=":metadata.name" | grep enrichment)
+       pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep enrichment)
        if [ -z "$pvc_name" ]; then
                pvc_name=enrichmentservice-pvc
        fi
        echo " Trying to reset pvc: "$pvc_name
 
-       __kube_clean_pvc $ECS_APP_NAME nonrtric $pvc_name /var/enrichment-coordinator-service/database
+       __kube_clean_pvc $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE $pvc_name $ECS_CONTAINER_MNT_DIR
 
        __log_test_pass
        return 0
index 59bdb67..ee617ef 100644 (file)
@@ -78,47 +78,57 @@ __NGW_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __NGW_store_docker_logs() {
-       docker logs $NRT_GATEWAY_APP_NAME > $1$2_gateway.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=NGW" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_gateway.log 2>&1
+       else
+               docker logs $NRT_GATEWAY_APP_NAME > $1$2_gateway.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__NGW_initial_setup() {
+       use_gateway_http
 }
 
 #######################################################
 
-## Access to Gateway
-# Host name may be changed if app started by kube
-# Direct access from script
-NGW_HTTPX="http"
-NGW_HOST_NAME=$LOCALHOST_NAME
-NGW_PATH=$NGW_HTTPX"://"$NGW_HOST_NAME":"$NRT_GATEWAY_EXTERNAL_PORT
-# NGW_ADAPTER used for switch between REST and DMAAP (only REST supported currently)
-NGW_ADAPTER_TYPE="REST"
-NGW_ADAPTER=$NGW_PATH
-###########################
-### Gateway functions
-###########################
-
-# Set http as the protocol to use for all communication to the Gateway
+
+
+
+
+# Set http as the protocol to use for all communication to the nonrtric gateway
 # args: -
 # (Function for test scripts)
 use_gateway_http() {
-       echo -e $BOLD"Gateway, NGW, protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards NGW"
-       NGW_HTTPX="http"
-       NGW_PATH=$NGW_HTTPX"://"$NGW_HOST_NAME":"$NRT_GATEWAY_EXTERNAL_PORT
-       NGW_ADAPTER_TYPE="REST"
-       NGW_ADAPTER=$NGW_PATH
-       echo ""
+       __gateway_set_protocoll "http" $NRT_GATEWAY_INTERNAL_PORT $NRT_GATEWAY_EXTERNAL_PORT
 }
 
-# Set https as the protocol to use for all communication to the Gateway
+# Set https as the protocol to use for all communication to the nonrtric gateway
 # args: -
 # (Function for test scripts)
 use_gateway_https() {
-       echo -e $BOLD"Gateway, NGW, protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards NGW"
-       NGW_HTTPX="https"
-       NGW_PATH=$NGW_HTTPX"://"$NGW_HOST_NAME":"$NRT_GATEWAY_EXTERNAL_SECURE_PORT
-       NGW_ADAPTER_TYPE="REST"
-       NGW_ADAPTER=$NGW_PATH
+       __gateway_set_protocoll "https" $NRT_GATEWAY_INTERNAL_SECURE_PORT $NRT_GATEWAY_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__gateway_set_protocoll() {
+       echo -e $BOLD"$NRT_GATEWAY_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $NRT_GATEWAY_DISPLAY_NAME"
+
+       ## Access to nonrtric gateway
+
+       NRT_GATEWAY_SERVICE_PATH=$1"://"$NRT_GATEWAY_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               NRT_GATEWAY_SERVICE_PATH=$1"://"$NRT_GATEWAY_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # NRT_GATEWAY_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
+       NRT_GATEWAY_ADAPTER_TYPE="REST"
+       NRT_GATEWAY_ADAPTER=$DMAAP_ADP_SERVICE_PATH
+
        echo ""
 }
 
@@ -127,7 +137,7 @@ use_gateway_https() {
 # (Function for test scripts)
 set_gateway_debug() {
        echo -e $BOLD"Setting gateway debug logging"$EBOLD
-       curlString="$NGW_PATH$NRT_GATEWAY_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
+       curlString="$NRT_GATEWAY_SERVICE_PATH$NRT_GATEWAY_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"debug\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "could not set debug mode" $@
@@ -143,7 +153,7 @@ set_gateway_debug() {
 # (Function for test scripts)
 set_gateway_trace() {
        echo -e $BOLD"Setting gateway trace logging"$EBOLD
-       curlString="$NGW_PATH$NRT_GATEWAY_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
+       curlString="$NRT_GATEWAY_SERVICE_PATH$NRT_GATEWAY_ACTUATOR -X POST  -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
        result=$(__do_curl "$curlString")
        if [ $? -ne 0 ]; then
                __print_err "could not set trace mode" $@
@@ -154,6 +164,38 @@ set_gateway_trace() {
        return 0
 }
 
+# Export env vars for config files, docker compose and kube resources
+# args: -
+__gateway_export_vars() {
+
+       export NRT_GATEWAY_APP_NAME
+       export NRT_GATEWAY_DISPLAY_NAME
+
+       export KUBE_NONRTRIC_NAMESPACE
+       export DOCKER_SIM_NWNAME
+
+       export NRT_GATEWAY_IMAGE
+       export NRT_GATEWAY_INTERNAL_PORT
+       export NRT_GATEWAY_INTERNAL_SECURE_PORT
+       export NRT_GATEWAY_EXTERNAL_PORT
+       export NRT_GATEWAY_EXTERNAL_SECURE_PORT
+       export NRT_GATEWAY_CONFIG_MOUNT_PATH
+       export NRT_GATEWAY_CONFIG_FILE
+       export NGW_CONFIG_CONFIGMAP_NAME=$NRT_GATEWAY_APP_NAME"-config"
+       export NRT_GATEWAY_HOST_MNT_DIR
+       export NRT_GATEWAY_COMPOSE_DIR
+
+       if [ $RUNMODE == "KUBE" ]; then
+               export POLICY_AGENT_EXTERNAL_SECURE_PORT
+               export ECS_EXTERNAL_SECURE_PORT
+               export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME.$KUBE_NONRTRIC_NAMESPACE
+               export ECS_DOMAIN_NAME=$ECS_APP_NAME.$KUBE_NONRTRIC_NAMESPACE
+       else
+               export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME
+               export ECS_DOMAIN_NAME=$ECS_APP_NAME
+       fi
+}
+
 # Start the Gateway container
 # args: -
 # (Function for test scripts)
@@ -194,22 +236,7 @@ start_gateway() {
 
                        echo -e " Creating $NRT_GATEWAY_APP_NAME app and expose service"
 
-                       #Export all vars needed for service and deployment
-                       export NRT_GATEWAY_APP_NAME
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export NRT_GATEWAY_IMAGE
-                       export NRT_GATEWAY_INTERNAL_PORT
-                       export NRT_GATEWAY_INTERNAL_SECURE_PORT
-                       export NRT_GATEWAY_EXTERNAL_PORT
-                       export NRT_GATEWAY_EXTERNAL_SECURE_PORT
-                       export NRT_GATEWAY_CONFIG_MOUNT_PATH
-                       export NRT_GATEWAY_CONFIG_FILE
-                       export NGW_CONFIG_CONFIGMAP_NAME=$NRT_GATEWAY_APP_NAME"-config"
-
-                       export POLICY_AGENT_EXTERNAL_SECURE_PORT
-                       export ECS_EXTERNAL_SECURE_PORT
-                       export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME.$KUBE_NONRTRIC_NAMESPACE
-                       export ECS_DOMAIN_NAME=$ECS_APP_NAME.$KUBE_NONRTRIC_NAMESPACE
+                       __gateway_export_vars
 
                        #Check if nonrtric namespace exists, if not create it
                        __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
@@ -233,25 +260,8 @@ start_gateway() {
 
                fi
 
-               echo " Retrieving host and ports for service..."
-               NGW_HOST_NAME=$(__kube_get_service_host $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-
-               NRT_GATEWAY_EXTERNAL_PORT=$(__kube_get_service_port $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               NRT_GATEWAY_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
+               __check_service_start $NRT_GATEWAY_APP_NAME $NRT_GATEWAY_SERVICE_PATH$NRT_GATEWAY_ALIVE_URL
 
-               echo " Host IP, http port, https port: $NGW_HOST_NAME $NRT_GATEWAY_EXTERNAL_PORT $NRT_GATEWAY_EXTERNAL_SECURE_PORT"
-               if [ $NGW_HTTPX == "http" ]; then
-                       NGW_PATH=$NGW_HTTPX"://"$NGW_HOST_NAME":"$NRT_GATEWAY_EXTERNAL_PORT
-               else
-                       NGW_PATH=$NGW_HTTPX"://"$NGW_HOST_NAME":"$NRT_GATEWAY_EXTERNAL_SECURE_PORT
-               fi
-
-               __check_service_start $NRT_GATEWAY_APP_NAME $NGW_PATH$NRT_GATEWAY_ALIVE_URL
-
-               # Update the curl adapter if set to rest, no change if type dmaap
-               if [ $NGW_ADAPTER_TYPE == "REST" ]; then
-                       NGW_ADAPTER=$NGW_PATH
-               fi
        else
                # Check if docker app shall be fully managed by the test script
                __check_included_image 'NGW'
@@ -261,25 +271,7 @@ start_gateway() {
                        exit
                fi
 
-               # Export needed vars for docker compose
-        export NRT_GATEWAY_APP_NAME
-        export NRT_GATEWAY_INTERNAL_PORT
-        export NRT_GATEWAY_EXTERNAL_PORT
-        #export NRT_GATEWAY_INTERNAL_SECURE_PORT
-        #export NRT_GATEWAY_EXTERNAL_SECURE_PORT
-
-        export DOCKER_SIM_NWNAME
-               export NRT_GATEWAY_HOST_MNT_DIR
-               export NRT_GATEWAY_CONFIG_FILE
-               export NRT_GATEWAY_CONFIG_MOUNT_PATH
-               export NRT_GATEWAY_COMPOSE_DIR
-
-               export POLICY_AGENT_DOMAIN_NAME=$POLICY_AGENT_APP_NAME
-               export POLICY_AGENT_EXTERNAL_SECURE_PORT
-               export ECS_DOMAIN_NAME=$ECS_APP_NAME
-               export ECS_EXTERNAL_SECURE_PORT
-
-               export NRT_GATEWAY_DISPLAY_NAME
+               __gateway_export_vars
 
                dest_file=$SIM_GROUP/$NRT_GATEWAY_COMPOSE_DIR/$NRT_GATEWAY_HOST_MNT_DIR/$NRT_GATEWAY_CONFIG_FILE
 
@@ -287,7 +279,7 @@ start_gateway() {
 
                __start_container $NRT_GATEWAY_COMPOSE_DIR "" NODOCKERARGS 1 $NRT_GATEWAY_APP_NAME
 
-               __check_service_start $NRT_GATEWAY_APP_NAME $NGW_PATH$NRT_GATEWAY_ALIVE_URL
+               __check_service_start $NRT_GATEWAY_APP_NAME $NRT_GATEWAY_SERVICE_PATH$NRT_GATEWAY_ALIVE_URL
        fi
        echo ""
 }
index 0a7718e..56ce6d4 100644 (file)
@@ -92,7 +92,18 @@ __HTTPPROXY_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __HTTPPROXY_store_docker_logs() {
-       docker logs $HTTP_PROXY_APP_NAME > $1$2_httpproxy.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=HTTPPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_httpproxy.log 2>&1
+       else
+               docker logs $HTTP_PROXY_APP_NAME > $1$2_httpproxy.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__HTTPPROXY_initial_setup() {
+       :
 }
 
 #######################################################
@@ -248,9 +259,9 @@ start_http_proxy() {
                __start_container $HTTP_PROXY_COMPOSE_DIR "" NODOCKERARGS 1 $HTTP_PROXY_APP_NAME
 
                if [ $HTTP_PROXY_HTTPX == "http" ]; then
-                       HTTP_PROXY_PATH=$HTTP_PROXY_HTTPX"://"$HTTP_PROXY_HOST_NAME":"$HTTP_PROXY_EXTERNAL_PORT
+                       HTTP_PROXY_PATH=$HTTP_PROXY_HTTPX"://"$HTTP_PROXY_HOST_NAME":"$HTTP_PROXY_WEB_INTERNAL_PORT
                else
-                       HTTP_PROXY_PATH=$HTTP_PROXY_HTTPX"://"$HTTP_PROXY_HOST_NAME":"$HTTP_PROXY_EXTERNAL_SECURE_PORT
+                       HTTP_PROXY_PATH=$HTTP_PROXY_HTTPX"://"$HTTP_PROXY_HOST_NAME":"$HTTP_PROXY_WEB_INTERNAL_SECURE_PORT
                fi
         __check_service_start $HTTP_PROXY_APP_NAME $HTTP_PROXY_PATH$HTTP_PROXY_ALIVE_URL
 
index 59b6346..dcaaf80 100644 (file)
@@ -93,7 +93,18 @@ __KUBEPROXY_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __KUBEPROXY_store_docker_logs() {
-       docker logs $KUBE_PROXY_APP_NAME > $1$2_kubeproxy.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=KUBEPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kubeproxy.log 2>&1
+       else
+               docker logs $KUBE_PROXY_APP_NAME > $1$2_kubeproxy.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__KUBEPROXY_initial_setup() {
+       use_kube_proxy_http
 }
 
 #######################################################
@@ -133,6 +144,33 @@ use_kube_proxy_https() {
 ### Kube Http Proxy functions
 #########################
 
+# Export env vars for config files, docker compose and kube resources
+# args: -
+__kube_proxy_vars() {
+
+       export KUBE_PROXY_WEB_EXTERNAL_PORT
+       export KUBE_PROXY_WEB_INTERNAL_PORT
+       export KUBE_PROXY_EXTERNAL_PORT
+       export KUBE_PROXY_INTERNAL_PORT
+
+       export KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT
+       export KUBE_PROXY_WEB_INTERNAL_SECURE_PORT
+       export KUBE_PROXY_EXTERNAL_SECURE_PORT
+       export KUBE_PROXY_INTERNAL_SECURE_PORT
+
+       export KUBE_SIM_NAMESPACE
+       export KUBE_PROXY_IMAGE
+
+       export KUBE_PROXY_APP_NAME
+       export KUBE_PROXY_DOCKER_EXTERNAL_PORT
+       export KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT
+       export KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT
+       export KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT
+       export DOCKER_SIM_NWNAME
+
+       export KUBE_PROXY_DISPLAY_NAME
+}
+
 # Start the Kube Http Proxy in the simulator group
 # args: -
 # (Function for test scripts)
@@ -170,20 +208,10 @@ start_kube_proxy() {
 
                if [ $retcode_i -eq 0 ]; then
                        echo -e " Creating $KUBE_PROXY_APP_NAME deployment and service"
-                       export KUBE_PROXY_APP_NAME
 
-                       export KUBE_PROXY_WEB_EXTERNAL_PORT
-                       export KUBE_PROXY_WEB_INTERNAL_PORT
-                       export KUBE_PROXY_EXTERNAL_PORT
-                       export KUBE_PROXY_INTERNAL_PORT
+                       __kube_proxy_vars
 
-                       export KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT
-                       export KUBE_PROXY_WEB_INTERNAL_SECURE_PORT
-                       export KUBE_PROXY_EXTERNAL_SECURE_PORT
-                       export KUBE_PROXY_INTERNAL_SECURE_PORT
-
-                       export KUBE_SIM_NAMESPACE
-                       export KUBE_PROXY_IMAGE
+                       export KUBE_PROXY_APP_NAME
 
                        __kube_create_namespace $KUBE_SIM_NAMESPACE
 
@@ -251,8 +279,43 @@ start_kube_proxy() {
                # Set proxy for all subsequent calls for all services etc
                export KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_PORT
                export KUBE_PROXY_HTTPX
+
+               KP_PORT1=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")
+               KP_PORT2=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "https")
+
+               echo " $KUBE_PROXY_DISPLAY_NAME node ports (http/https): $KP_PORT1 $KP_PORT2"
+
        else
-               echo $YELLOW" Kube http proxy not needed in docker test. App not started"
+               # Check if docker app shall be fully managed by the test script
+               __check_included_image 'KUBEPROXY'
+               if [ $? -eq 1 ]; then
+                       echo -e $RED"The Kube Proxy app is not included in this test script"$ERED
+                       echo -e $RED"The Kube Proxy will not be started"$ERED
+                       exit
+               fi
+
+               __kube_proxy_vars
+
+               __start_container $KUBE_PROXY_COMPOSE_DIR "" NODOCKERARGS 1 $KUBE_PROXY_APP_NAME
+
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       export KUBE_PROXY_WEB_PATH=$KUBE_PROXY_HTTPX"://"$LOCALHOST_NAME":"$KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT
+               else
+                       export KUBE_PROXY_WEB_PATH=$KUBE_PROXY_HTTPX"://"$LOCALHOST_NAME":"$KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT
+               fi
+
+               export KUBE_PROXY_PATH=  # Make sure proxy is empty when checking the proxy itself
+        __check_service_start $KUBE_PROXY_APP_NAME $KUBE_PROXY_WEB_PATH$KUBE_PROXY_ALIVE_URL
+
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       export KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$LOCALHOST_NAME":"$KUBE_PROXY_DOCKER_EXTERNAL_PORT
+               else
+                       export KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$LOCALHOST_NAME":"$KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT
+               fi
+
+               echo " $KUBE_PROXY_DISPLAY_NAME localhost ports (http/https): $KUBE_PROXY_DOCKER_EXTERNAL_PORT $KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT"
+
+
        fi
        echo ""
 
index 3569f6c..c6a5a2c 100755 (executable)
@@ -157,168 +157,139 @@ __DMAAPMR_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __MR_store_docker_logs() {
-       docker logs $MR_STUB_APP_NAME > $1$2_mr_stub.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=MR" -n $KUBE_ONAP_NAMESPACE --tail=-1 > $1$2_mr_stub.log 2>&1
+       else
+               docker logs $MR_STUB_APP_NAME > $1$2_mr_stub.log 2>&1
+       fi
 }
 
 # Store docker logs
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __DMAAPMR_store_docker_logs() {
-       docker logs $MR_DMAAP_APP_NAME > $1$2mr.log 2>&1
-       docker logs $MR_KAFKA_APP_NAME > $1$2_mr_kafka.log 2>&1
-       docker logs $MR_ZOOKEEPER_APP_NAME > $1$2_mr_zookeeper.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               for podname in $(kubectl get pods -n $KUBE_ONAP_NAMESPACE -l "autotest=DMAAPMR" -o custom-columns=":metadata.name"); do
+                       kubectl logs -n $KUBE_ONAP_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+               done
+       else
+               docker logs $MR_DMAAP_APP_NAME > $1$2mr.log 2>&1
+               docker logs $MR_KAFKA_APP_NAME > $1$2_mr_kafka.log 2>&1
+               docker logs $MR_ZOOKEEPER_APP_NAME > $1$2_mr_zookeeper.log 2>&1
+       fi
 }
 
-#######################################################
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__MR_initial_setup() {
+       use_mr_http
+}
 
-## Access to Message Router
-# Host name may be changed if app started by kube
-# Direct access from script
-MR_HTTPX="http"
-MR_STUB_HOST_NAME=$LOCALHOST_NAME
-MR_DMAAP_HOST_NAME=$LOCALHOST_NAME
-MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_STUB_LOCALHOST_PORT
-MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_DMAAP_LOCALHOST_PORT
-#Docker/Kube internal path
-if [ $RUNMODE == "KUBE" ]; then
-       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-       __check_included_image "DMAAPMR"
-       if [ $? -eq 0 ]; then
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-       fi
-       __check_prestarted_image "DMAAPMR"
-       if [ $? -eq 0 ]; then
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-       fi
-else
-       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$MR_INTERNAL_PORT
-       __check_included_image "DMAAPMR"
-       if [ $? -eq 0 ]; then
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$MR_INTERNAL_PORT
-       fi
-fi
-MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_STUB_LOCALHOST_PORT
-MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_STUB_LOCALHOST_SECURE_PORT
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__DMAAPMR_initial_setup() {
+       :  # handle by __MR_initial_setup
+}
 
 
-#####################
-### MR stub functions
-#####################
+#######################################################
 
 use_mr_http() {
-       echo -e $BOLD"MR protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards MR"
-       MR_HTTPX="http"
-       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_STUB_LOCALHOST_PORT
-       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_DMAAP_LOCALHOST_PORT
-       #Docker/Kube internal path
-       if [ $RUNMODE == "KUBE" ]; then
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-               __check_included_image "DMAAPMR"
-               if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-               fi
-               __check_prestarted_image "DMAAPMR"
-               if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_PORT
-               fi
-       else
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$MR_INTERNAL_PORT
-               __check_included_image "DMAAPMR"
-               if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$MR_INTERNAL_PORT
-               fi
-       fi
-       echo ""
+       __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
 }
 
 use_mr_https() {
-       echo -e $BOLD"MR protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards MR"
-       MR_HTTPX="https"
-       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_STUB_LOCALHOST_SECURE_PORT
-       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_DMAAP_LOCALHOST_SECURE_PORT
-       #Docker/Kube internal path
+       __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+__mr_set_protocoll() {
+       echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
+
+       ## Access to Dmaap mediator
+
+       MR_HTTPX=$1
+
+       # Access via test script
+       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2  # access from script via proxy, docker
+       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+
+       MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker -  access pod->svc, kube
+       __check_included_image "DMAAPMR"
+       if [ $? -eq 0 ]; then
+               MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker -  access pod->svc, kube
+       fi
+
+       # For directing calls from script to e.g.PMS via message rounter
+       # Theses case shall always go though the  mr-stub
+       MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
+       MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+
        if [ $RUNMODE == "KUBE" ]; then
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_SECURE_PORT
+               MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+               MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+
+               MR_SERVICE_PATH=$MR_STUB_PATH
                __check_included_image "DMAAPMR"
                if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_SECURE_PORT
+                       MR_SERVICE_PATH=$MR_DMAAP_PATH
                fi
                __check_prestarted_image "DMAAPMR"
                if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXTERNAL_SECURE_PORT
-               fi
-       else
-               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$MR_INTERNAL_SECURE_PORT
-               __check_included_image "DMAAPMR"
-               if [ $? -eq 0 ]; then
-                       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$MR_INTERNAL_SECURE_PORT
+                       MR_SERVICE_PATH=$MR_DMAAP_PATH
                fi
+
+               # For directing calls from script to e.g.PMS, via message rounter
+               # These calls shall always go though the  mr-stub
+               MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
+               MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
        fi
-       echo ""
-}
 
-# Create a dmaap mr topic
-# args: <topic name> <topic-description>
-__create_topic() {
-       echo -ne " Creating read topic: $1"$SAMELINE
+       # For calls from script to the mr-stub
+       MR_STUB_ADAPTER=$MR_STUB_PATH
+       MR_STUB_ADAPTER_TYPE="REST"
 
-       json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
-       echo $json_topic > ./tmp/$1.json
-
-       curlString="$MR_DMAAP_PATH/topics/create -X POST  -H Content-Type:application/json -d@./tmp/$1.json"
-       topic_retries=5
-       while [ $topic_retries -gt 0 ]; do
-               let topic_retries=topic_retries-1
-               result=$(__do_curl "$curlString")
-               if [ $? -eq 0 ]; then
-                       topic_retries=0
-                       echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
-               fi
-               if [ $? -ne 0 ]; then
-                       if [ $topic_retries -eq 0 ]; then
-                               echo -e " Creating read topic: $1 $RED Failed $ERED"
-                               ((RES_CONF_FAIL++))
-                               return 1
-                       else
-                               sleep 1
-                       fi
-               fi
-       done
-       return 0
+       echo ""
 }
 
-# Do a pipeclean of a topic - to overcome dmaap mr bug...
-# args: <topic> <post-url> <read-url>
-__dmaap_pipeclean() {
-       pipeclean_retries=50
-       echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
-       while [ $pipeclean_retries -gt 0 ]; do
-               echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
-               let pipeclean_retries=pipeclean_retries-1
-               curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/pipeclean.json"
-               result=$(__do_curl "$curlString")
-               if [ $? -ne 0 ]; then
-                       sleep 1
-               else
-                       curlString="$MR_DMAAP_PATH$3"
-                       result=$(__do_curl "$curlString")
-                       if [ $? -eq 0 ]; then
-                               if [ $result != "[]" ]; then
-                                       echo -e " Doing dmaap-mr pipe cleaning on topic: $1 $GREEN OK $EGREEN"
-                                       return 0
+# Export env vars for config files, docker compose and kube resources
+# args: -
+__dmaapmr_export_vars() {
+       #Docker only
+       export DOCKER_SIM_NWNAME
+       export ONAP_ZOOKEEPER_IMAGE
+       export MR_ZOOKEEPER_APP_NAME
+       export ONAP_KAFKA_IMAGE
+       export MR_KAFKA_APP_NAME
+       export ONAP_DMAAPMR_IMAGE
+       export MR_DMAAP_APP_NAME
+       export MR_DMAAP_LOCALHOST_PORT
+       export MR_INTERNAL_PORT
+       export MR_DMAAP_LOCALHOST_SECURE_PORT
+       export MR_INTERNAL_SECURE_PORT
+       export MR_DMAAP_HOST_MNT_DIR
+}
 
-                               else
-                                       sleep 1
-                               fi
-                       fi
-               fi
-       done
-       echo -e "Doing dmaap-mr pipe cleaning on topic: $1 $RED Failed $ERED"
-       return 1
+# Export env vars for config files, docker compose and kube resources
+# args: -
+__mr_export_vars() {
+       #Docker only
+       export DOCKER_SIM_NWNAME
+       export MR_STUB_APP_NAME
+       export MRSTUB_IMAGE
+       export MR_INTERNAL_PORT
+       export MR_INTERNAL_SECURE_PORT
+       export MR_STUB_LOCALHOST_PORT
+       export MR_STUB_LOCALHOST_SECURE_PORT
+       export MR_STUB_CERT_MOUNT_DIR
+       export MR_STUB_DISPLAY_NAME
 }
 
+
 # Start the Message Router stub interface in the simulator group
 # args: -
 # (Function for test scripts)
@@ -374,7 +345,7 @@ start_mr() {
                if [ $paramerror -ne 0 ]; then
                                echo -e $RED"The Message Router apps 'MR' and/or 'DMAAPMR' are not included in this test script"$ERED
                                echo -e $RED"The Message Router will not be started"$ERED
-                               echo -e $RED"Both MR and DAAMPMR  - or - only MR - need to be included and/or prestarted"
+                               echo -e $RED"Both MR and DAAMPMR  - or - only MR - need to be included and/or prestarted"$ERED
                                exit
                fi
 
@@ -384,6 +355,9 @@ start_mr() {
                fi
 
                if [ $retcode_included_dmaapmr -eq 0 ]; then
+
+                       __dmaapmr_export_vars
+
                        #export MR_DMAAP_APP_NAME
                        export MR_DMAAP_KUBE_APP_NAME=message-router
                        MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME
@@ -395,6 +369,7 @@ start_mr() {
                        export ONAP_DMAAPMR_IMAGE
 
                        export MR_KAFKA_BWDS_NAME=akfak-bwds
+                       export MR_KAFKA_BWDS_NAME=kaka
                        export KUBE_ONAP_NAMESPACE
 
                        export MR_ZOOKEEPER_APP_NAME
@@ -406,29 +381,30 @@ start_mr() {
                        # TODO - Fix domain name substitution in the prop file
                        # Create config maps - dmaapmr app
                        configfile=$PWD/tmp/MsgRtrApi.properties
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"/"mnt/mr/KUBE-MsgRtrApi.properties $configfile
+                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile
+
                        output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml
                        __kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        configfile=$PWD/tmp/logback.xml
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"/"mnt/mr/logback.xml $configfile
+                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile
                        output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml
                        __kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        configfile=$PWD/tmp/cadi.properties
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"/"mnt/mr/cadi.properties $configfile
+                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile
                        output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml
                        __kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        # Create config maps - kafka app
                        configfile=$PWD/tmp/zk_client_jaas.conf
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"/"mnt/kafka/zk_client_jaas.conf $configfile
+                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile
                        output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml
                        __kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        # Create config maps - zookeeper app
                        configfile=$PWD/tmp/zk_server_jaas.conf
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"/"mnt/zk/zk_server_jaas.conf $configfile
+                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile
                        output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml
                        __kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
@@ -519,10 +495,13 @@ start_mr() {
                MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
                MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
 
+               MR_STUB_ADAPTER=$MR_STUB_PATH
+               MR_STUB_ADAPTER_TYPE="REST"
+
                __check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL
 
                echo -ne " Service $MR_STUB_APP_NAME - reset  "$SAMELINE
-               result=$(__do_curl $MR_STUB_APP_NAME $MR_STUB_PATH/reset)
+               result=$(__do_curl $MR_STUB_PATH/reset)
                if [ $? -ne 0 ]; then
                        echo -e " Service $MR_STUB_APP_NAME - reset  $RED Failed $ERED - will continue"
                else
@@ -558,17 +537,7 @@ start_mr() {
                        export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
                fi
 
-               export DOCKER_SIM_NWNAME
-               export ONAP_ZOOKEEPER_IMAGE
-               export MR_ZOOKEEPER_APP_NAME
-               export ONAP_KAFKA_IMAGE
-               export MR_KAFKA_APP_NAME
-               export ONAP_DMAAPMR_IMAGE
-               export MR_DMAAP_APP_NAME
-               export MR_DMAAP_LOCALHOST_PORT
-               export MR_INTERNAL_PORT
-               export MR_DMAAP_LOCALHOST_SECURE_PORT
-               export MR_INTERNAL_SECURE_PORT
+               __dmaapmr_export_vars
 
                if [ $retcode_dmaapmr -eq 0 ]; then
                        __start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME
@@ -590,15 +559,7 @@ start_mr() {
                        echo $result | indent2
                fi
 
-               export DOCKER_SIM_NWNAME
-               export MR_STUB_APP_NAME
-               export MRSTUB_IMAGE
-               export MR_INTERNAL_PORT
-               export MR_INTERNAL_SECURE_PORT
-               export MR_STUB_LOCALHOST_PORT
-               export MR_STUB_LOCALHOST_SECURE_PORT
-               export MR_STUB_CERT_MOUNT_DIR
-               export MR_STUB_DISPLAY_NAME
+               __mr_export_vars
 
                if [ $retcode_mr -eq 0 ]; then
                        __start_container $MR_STUB_COMPOSE_DIR "" NODOCKERARGS 1 $MR_STUB_APP_NAME
@@ -611,6 +572,67 @@ start_mr() {
        return 0
 }
 
+# Create a dmaap mr topic
+# args: <topic name> <topic-description>
+__create_topic() {
+       echo -ne " Creating read topic: $1"$SAMELINE
+
+       json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
+       echo $json_topic > ./tmp/$1.json
+
+       curlString="$MR_DMAAP_PATH/topics/create -X POST  -H Content-Type:application/json -d@./tmp/$1.json"
+       topic_retries=5
+       while [ $topic_retries -gt 0 ]; do
+               let topic_retries=topic_retries-1
+               result=$(__do_curl "$curlString")
+               if [ $? -eq 0 ]; then
+                       topic_retries=0
+                       echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
+               fi
+               if [ $? -ne 0 ]; then
+                       if [ $topic_retries -eq 0 ]; then
+                               echo -e " Creating read topic: $1 $RED Failed $ERED"
+                               ((RES_CONF_FAIL++))
+                               return 1
+                       else
+                               sleep 1
+                       fi
+               fi
+       done
+       return 0
+}
+
+# Do a pipeclean of a topic - to overcome dmaap mr bug...
+# args: <topic> <post-url> <read-url>
+__dmaap_pipeclean() {
+       pipeclean_retries=50
+       echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
+       while [ $pipeclean_retries -gt 0 ]; do
+               echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
+               let pipeclean_retries=pipeclean_retries-1
+               curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/pipeclean.json"
+               result=$(__do_curl "$curlString")
+               if [ $? -ne 0 ]; then
+                       sleep 1
+               else
+                       curlString="$MR_DMAAP_PATH$3"
+                       result=$(__do_curl "$curlString")
+                       if [ $? -eq 0 ]; then
+                               if [ $result != "[]" ]; then
+                                       echo -e " Doing dmaap-mr pipe cleaning on topic: $1 $GREEN OK $EGREEN"
+                                       return 0
+
+                               else
+                                       sleep 1
+                               fi
+                       fi
+               fi
+       done
+       echo -e "Doing dmaap-mr pipe cleaning on topic: $1 $RED Failed $ERED"
+       return 1
+}
+
+
 ### Generic test cases for varaible checking
 
 # Tests if a variable value in the MR stub is equal to a target value and and optional timeout.
@@ -660,4 +682,28 @@ mr_print() {
                exit 1
        fi
        echo -e $BOLD"INFO(${BASH_LINENO[0]}): mrstub, $1 = $(__do_curl $MR_STUB_PATH/counter/$1)"$EBOLD
-}
\ No newline at end of file
+}
+
+# Send json to topic in mr-stub.
+# arg: <topic-url> <json-msg>
+# (Function for test scripts)
+mr_api_send_json() {
+       __log_test_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <json-msg>" $@
+        return 1
+    fi
+       query=$1
+       fname=$PWD/tmp/json_payload_to_mr.json
+       echo $2 > $fname
+       res="$(__do_curl_to_api MRSTUB POST $query $fname)"
+
+       status=${res:${#res}-3}
+       if [ $status -ne 200 ]; then
+               __log_test_fail_status_code 200 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
index f80e827..bb4ccf5 100644 (file)
@@ -93,58 +93,49 @@ __PRODSTUB_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __PRODSTUB_store_docker_logs() {
-       docker logs $PROD_STUB_APP_NAME > $1$2_prodstub.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=PRODSTUB" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_prodstub.log 2>&1
+       else
+               docker logs $PROD_STUB_APP_NAME > $1$2_prodstub.log 2>&1
+       fi
 }
-#######################################################
 
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__PRODSTUB_initial_setup() {
+       use_prod_stub_http
+}
 
-## Access to Prod stub sim
-# Direct access
-PROD_STUB_HTTPX="http"
-PROD_STUB_HOST_NAME=$LOCALHOST_NAME
-PROD_STUB_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_HOST_NAME":"$PROD_STUB_EXTERNAL_PORT
-
-#Docker/Kube internal path
-if [ $RUNMODE == "KUBE" ]; then
-       PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME"."$KUBE_SIM_NAMESPACE":"$PROD_STUB_EXTERNAL_PORT
-else
-       PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME":"$PROD_STUB_INTERNAL_PORT
-fi
+#######################################################
 
-# Set http as the protocol to use for all communication to the Producer stub
+# Set http as the protocol to use for all communication to the Prod stub sim
 # args: -
 # (Function for test scripts)
 use_prod_stub_http() {
-       echo -e $BOLD"Producer stub protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards Producer stub"
-
-       PROD_STUB_HTTPX="http"
-    PROD_STUB_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_HOST_NAME":"$PROD_STUB_EXTERNAL_PORT
-
-       if [ $RUNMODE == "KUBE" ]; then
-               PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME"."$KUBE_SIM_NAMESPACE":"$PROD_STUB_EXTERNAL_PORT
-       else
-               PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME":"$PROD_STUB_INTERNAL_PORT
-       fi
-
-       echo ""
+       __prod_stub_set_protocoll "http" $PROD_STUB_INTERNAL_PORT $PROD_STUB_EXTERNAL_PORT
 }
 
-# Set https as the protocol to use for all communication to the Producer stub
+# Set https as the protocol to use for all communication to the Prod stub sim
 # args: -
 # (Function for test scripts)
 use_prod_stub_https() {
-       echo -e $BOLD"Producer stub protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards Producer stub"
+       __prod_stub_set_protocoll "https" $PROD_STUB_INTERNAL_SECURE_PORT $PROD_STUB_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__prod_stub_set_protocoll() {
+       echo -e $BOLD"$PROD_STUB_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $PROD_STUB_DISPLAY_NAME"
 
-       PROD_STUB_HTTPX="https"
-    PROD_STUB_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_HOST_NAME":"$PROD_STUB_EXTERNAL_SECURE_PORT
+       ## Access to Prod stub sim
 
+       PROD_STUB_SERVICE_PATH=$1"://"$PROD_STUB_APP_NAME":"$2  # docker access, container->container and script->container via proxy
        if [ $RUNMODE == "KUBE" ]; then
-               PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME"."$KUBE_SIM_NAMESPACE":"$PROD_STUB_EXTERNAL_SECURE_PORT
-       else
-               PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME":"$PROD_STUB_INTERNAL_SECURE_PORT
+               PROD_STUB_SERVICE_PATH=$1"://"$PROD_STUB_APP_NAME.$KUBE_SIM_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
        fi
+
        echo ""
 }
 
@@ -154,6 +145,24 @@ use_prod_stub_https() {
 ### Producer stub functions
 ###########################
 
+# Export env vars for config files, docker compose and kube resources
+# args:
+__prodstub_export_vars() {
+       export PROD_STUB_APP_NAME
+       export PROD_STUB_APP_NAME_ALIAS
+       export PROD_STUB_DISPLAY_NAME
+
+       export DOCKER_SIM_NWNAME
+       export KUBE_SIM_NAMESPACE
+
+       export PROD_STUB_IMAGE
+       export PROD_STUB_INTERNAL_PORT
+       export PROD_STUB_INTERNAL_SECURE_PORT
+       export PROD_STUB_EXTERNAL_PORT
+       export PROD_STUB_EXTERNAL_SECURE_PORT
+}
+
+
 # Start the Producer stub in the simulator group
 # args: -
 # (Function for test scripts)
@@ -190,16 +199,11 @@ start_prod_stub() {
 
                if [ $retcode_i -eq 0 ]; then
                        echo -e " Creating $PROD_STUB_APP_NAME deployment and service"
-                       export PROD_STUB_APP_NAME
-                       export KUBE_SIM_NAMESPACE
-                       export PROD_STUB_IMAGE
-                       export PROD_STUB_INTERNAL_PORT
-                       export PROD_STUB_INTERNAL_SECURE_PORT
-                       export PROD_STUB_EXTERNAL_PORT
-                       export PROD_STUB_EXTERNAL_SECURE_PORT
 
             __kube_create_namespace $KUBE_SIM_NAMESPACE
 
+                       __prodstub_export_vars
+
                        # Create service
                        input_yaml=$SIM_GROUP"/"$PROD_STUB_COMPOSE_DIR"/"svc.yaml
                        output_yaml=$PWD/tmp/prodstub_svc.yaml
@@ -211,24 +215,10 @@ start_prod_stub() {
                        __kube_create_instance app $PROD_STUB_APP_NAME $input_yaml $output_yaml
                fi
 
-               PROD_STUB_HOST_NAME=$(__kube_get_service_host $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE)
-
-               PROD_STUB_EXTERNAL_PORT=$(__kube_get_service_port $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE "http")
-               PROD_STUB_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $PROD_STUB_HOST_NAME $PROD_STUB_EXTERNAL_PORT $PROD_STUB_EXTERNAL_SECURE_PORT"
-               if [ $PROD_STUB_HTTPX == "http" ]; then
-            PROD_STUB_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_HOST_NAME":"$PROD_STUB_EXTERNAL_PORT
-                       PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME"."$KUBE_SIM_NAMESPACE":"$PROD_STUB_EXTERNAL_PORT
-               else
-            PROD_STUB_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_HOST_NAME":"$PROD_STUB_EXTERNAL_SECURE_PORT
-                       PROD_STUB_SERVICE_PATH=$PROD_STUB_HTTPX"://"$PROD_STUB_APP_NAME"."$KUBE_SIM_NAMESPACE":"$PROD_STUB_EXTERNAL_SECURE_PORT
-               fi
-
-               __check_service_start $PROD_STUB_APP_NAME $PROD_STUB_PATH$PROD_STUB_ALIVE_URL
+               __check_service_start $PROD_STUB_APP_NAME $PROD_STUB_SERVICE_PATH$PROD_STUB_ALIVE_URL
 
                echo -ne " Service $PROD_STUB_APP_NAME - reset  "$SAMELINE
-               result=$(__do_curl $PROD_STUB_PATH/reset)
+               result=$(__do_curl $PROD_STUB_SERVICE_PATH/reset)
                if [ $? -ne 0 ]; then
                        echo -e " Service $PROD_STUB_APP_NAME - reset  $RED Failed $ERED - will continue"
                else
@@ -244,19 +234,11 @@ start_prod_stub() {
                        exit
                fi
 
-        export PROD_STUB_APP_NAME
-        export PROD_STUB_APP_NAME_ALIAS
-        export PROD_STUB_INTERNAL_PORT
-        export PROD_STUB_EXTERNAL_PORT
-        export PROD_STUB_INTERNAL_SECURE_PORT
-        export PROD_STUB_EXTERNAL_SECURE_PORT
-        export DOCKER_SIM_NWNAME
-
-               export PROD_STUB_DISPLAY_NAME
+               __prodstub_export_vars
 
                __start_container $PROD_STUB_COMPOSE_DIR "" NODOCKERARGS 1 $PROD_STUB_APP_NAME
 
-        __check_service_start $PROD_STUB_APP_NAME $PROD_STUB_PATH$PROD_STUB_ALIVE_URL
+        __check_service_start $PROD_STUB_APP_NAME $PROD_STUB_SERVICE_PATH$PROD_STUB_ALIVE_URL
        fi
     echo ""
     return 0
@@ -268,13 +250,11 @@ __execute_curl_to_prodstub() {
     TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
     echo "(${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
        proxyflag=""
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       if [ $KUBE_PROXY_HTTPX == "http" ]; then
-                               proxyflag=" --proxy $KUBE_PROXY_PATH"
-                       else
-                               proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
-                       fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
                fi
        fi
        echo " CMD: $3 $proxyflag" >> $HTTPLOG
@@ -326,7 +306,7 @@ prodstub_arm_producer() {
                return 1
        fi
 
-    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_PATH/arm/supervision/"$2
+    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_SERVICE_PATH/arm/supervision/"$2
        if [ $# -eq 3 ]; then
                curlString=$curlString"?response="$3
        fi
@@ -345,7 +325,7 @@ prodstub_arm_job_create() {
                return 1
        fi
 
-    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_PATH/arm/create/$2/$3"
+    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_SERVICE_PATH/arm/create/$2/$3"
        if [ $# -eq 4 ]; then
                curlString=$curlString"?response="$4
        fi
@@ -364,7 +344,7 @@ prodstub_arm_job_delete() {
                return 1
        fi
 
-    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_PATH/arm/delete/$2/$3"
+    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_SERVICE_PATH/arm/delete/$2/$3"
        if [ $# -eq 4 ]; then
                curlString=$curlString"?response="$4
        fi
@@ -383,7 +363,7 @@ prodstub_arm_type() {
                return 1
        fi
 
-    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_PATH/arm/type/$2/$3"
+    curlString="curl -X PUT -skw %{http_code} $PROD_STUB_SERVICE_PATH/arm/type/$2/$3"
 
     __execute_curl_to_prodstub CONF $1 "$curlString"
     return $?
@@ -399,7 +379,7 @@ prodstub_disarm_type() {
                return 1
        fi
 
-    curlString="curl -X DELETE -skw %{http_code} $PROD_STUB_PATH/arm/type/$2/$3"
+    curlString="curl -X DELETE -skw %{http_code} $PROD_STUB_SERVICE_PATH/arm/type/$2/$3"
 
     __execute_curl_to_prodstub CONF $1 "$curlString"
     return $?
@@ -425,7 +405,7 @@ prodstub_check_jobdata() {
     file="./tmp/.p.json"
        echo "$targetJson" > $file
 
-    curlString="curl -X GET -skw %{http_code} $PROD_STUB_PATH/jobdata/$2/$3"
+    curlString="curl -X GET -skw %{http_code} $PROD_STUB_SERVICE_PATH/jobdata/$2/$3"
 
     __execute_curl_to_prodstub TEST $1 "$curlString" $file
     return $?
@@ -455,7 +435,7 @@ prodstub_check_jobdata_2() {
        file="./tmp/.p.json"
        echo "$targetJson" > $file
 
-    curlString="curl -X GET -skw %{http_code} $PROD_STUB_PATH/jobdata/$2/$3"
+    curlString="curl -X GET -skw %{http_code} $PROD_STUB_SERVICE_PATH/jobdata/$2/$3"
 
     __execute_curl_to_prodstub TEST $1 "$curlString" $file
     return $?
@@ -481,7 +461,7 @@ prodstub_check_jobdata_3() {
     file="./tmp/.p.json"
        echo "$targetJson" > $file
 
-    curlString="curl -X GET -skw %{http_code} $PROD_STUB_PATH/jobdata/$2/$3"
+    curlString="curl -X GET -skw %{http_code} $PROD_STUB_SERVICE_PATH/jobdata/$2/$3"
 
     __execute_curl_to_prodstub TEST $1 "$curlString" $file
     return $?
@@ -496,7 +476,7 @@ prodstub_delete_jobdata() {
                __print_err "<response-code> <producer-id> <job-id> " $@
                return 1
        fi
-    curlString="curl -X DELETE -skw %{http_code} $PROD_STUB_PATH/jobdata/$2/$3"
+    curlString="curl -X DELETE -skw %{http_code} $PROD_STUB_SERVICE_PATH/jobdata/$2/$3"
 
     __execute_curl_to_prodstub CONF $1 "$curlString"
     return $?
@@ -511,7 +491,7 @@ prodstub_delete_jobdata() {
 # (Function for test scripts)
 prodstub_equal() {
        if [ $# -eq 2 ] || [ $# -eq 3 ]; then
-               __var_test "PRODSTUB" "$PROD_STUB_PATH/counter/" $1 "=" $2 $3
+               __var_test "PRODSTUB" "$PROD_STUB_SERVICE_PATH/counter/" $1 "=" $2 $3
        else
                __print_err "Wrong args to prodstub_equal, needs two or three args: <sim-param> <target-value> [ timeout ]" $@
        fi
diff --git a/test/common/pvccleaner_api_functions.sh b/test/common/pvccleaner_api_functions.sh
new file mode 100644 (file)
index 0000000..62c2d43
--- /dev/null
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service management functions
+# for PVCCLEANER
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__PVCCLEANER_imagesetup() {
+       __check_and_create_image_var PVCCLEANER "PVC_CLEANER_IMAGE" "PVC_CLEANER_IMAGE_BASE" "PVC_CLEANER_IMAGE_TAG" REMOTE_PROXY "$PVC_CLEANER_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__PVCCLEANER_imagepull() {
+       __check_and_pull_image $1 "$PVC_CLEANER_DISPLAY_NAME" $PVC_CLEANER_APP_NAME PVC_CLEANER_IMAGE
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__PVCCLEANER_imagebuild() {
+       echo -e $RED"Image for app PVCCLEANER shall never be built"$ERED
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__PVCCLEANER_image_data() {
+       echo -e "$PVC_CLEANER_DISPLAY_NAME\t$(docker images --format $1 $PVC_CLEANER_IMAGE)" >>   $2
+       if [ ! -z "$PVC_CLEANER_IMAGE_SOURCE" ]; then
+               echo -e "-- source image --\t$(docker images --format $1 $PVC_CLEANER_IMAGE_SOURCE)" >>   $2
+       fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__PVCCLEANER_kube_scale_zero() {
+       :
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__PVCCLEANER_kube_scale_zero_and_wait() {
+       :
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__PVCCLEANER_kube_delete_all() {
+       :
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__PVCCLEANER_store_docker_logs() {
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=PRODSTUB" -A --tail=-1 > $1$2_pvs_cleaner.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__PVCCLEANER_initial_setup() {
+       :
+}
+
+#######################################################
+
+# This is a system app, all usage in testcase_common.sh
\ No newline at end of file
index 254883e..52416d3 100644 (file)
@@ -57,8 +57,7 @@ __RC_kube_scale_zero() {
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
 # This function is called for prestarted apps not managed by the test script.
 __RC_kube_scale_zero_and_wait() {
-       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app nonrtric-rappcatalogueservice
-       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest RC
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-rappcatalogueservice
 }
 
 # Delete all kube resouces for the app
@@ -71,52 +70,73 @@ __RC_kube_delete_all() {
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __RC_store_docker_logs() {
-       docker logs $RAPP_CAT_APP_NAME > $1$2_rc.log 2>&1
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=RC" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_rc.log 2>&1
+       else
+               docker logs $RAPP_CAT_APP_NAME > $1$2_rc.log 2>&1
+       fi
 }
 
-#######################################################
-
-## Access to RAPP Catalogue
-# Host name may be changed if app started by kube
-# Direct access from script
-RC_HTTPX="http"
-RC_HOST_NAME=$LOCALHOST_NAME
-RC_PATH=$RC_HTTPX"://"$RC_HOST_NAME":"$RAPP_CAT_EXTERNAL_PORT
-# RC_ADAPTER used for switch between REST and DMAAP (only REST supported currently)
-RC_ADAPTER_TYPE="REST"
-RC_ADAPTER=$RC_PATH
-
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__RC_initial_setup() {
+       use_rapp_catalogue_http
+}
 
-###########################
-### RAPP Catalogue
-###########################
+#######################################################
 
-# Set http as the protocol to use for all communication to the RAPP Catalogue
+# Set http as the protocol to use for all communication to the Rapp catalogue
 # args: -
 # (Function for test scripts)
 use_rapp_catalogue_http() {
-       echo -e $BOLD"RAPP Catalogue protocol setting"$EBOLD
-       echo -e " Using $BOLD http $EBOLD towards the RAPP Catalogue"
-       RC_HTTPX="http"
-       RC_PATH=$RC_HTTPX"://"$RC_HOST_NAME":"$RAPP_CAT_EXTERNAL_PORT
-       RC_ADAPTER_TYPE="REST"
-       RC_ADAPTER=$RC_PATH
-       echo ""
+       __rapp_catalogue_set_protocoll "http" $RAPP_CAT_INTERNAL_PORT $RAPP_CAT_EXTERNAL_PORT
 }
 
-# Set https as the protocol to use for all communication to the RAPP Catalogue
+# Set https as the protocol to use for all communication to the Rapp catalogue
 # args: -
 # (Function for test scripts)
 use_rapp_catalogue_https() {
-       echo -e $BOLD"RAPP Catalogue protocol setting"$EBOLD
-       echo -e " Using $BOLD https $EBOLD towards the RAPP Catalogue"
-       RC_HTTPX="https"
-       RC_PATH=$RC_HTTPX"://"$RC_HOST_NAME":"$RAPP_CAT_EXTERNAL_SECURE_PORT
+       __rapp_catalogue_set_protocoll "https" $RAPP_CAT_INTERNAL_SECURE_PORT $RAPP_CAT_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__rapp_catalogue_set_protocoll() {
+       echo -e $BOLD"$RAPP_CAT_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD http $EBOLD towards $RAPP_CAT_DISPLAY_NAME"
+
+       ## Access to Rapp catalogue
+
+       RC_SERVICE_PATH=$1"://"$RAPP_CAT_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               RC_SERVICE_PATH=$1"://"$RAPP_CAT_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       # RC_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
        RC_ADAPTER_TYPE="REST"
-       RC_ADAPTER=$RC_PATH
+       RC_ADAPTER=$RC_SERVICE_PATH
+
        echo ""
 }
 
+# Export env vars for config files, docker compose and kube resources
+# args:
+__rapp_catalogue_export_vars() {
+
+       export RAPP_CAT_APP_NAME
+       export RAPP_CAT_DISPLAY_NAME
+
+       export DOCKER_SIM_NWNAME
+       export KUBE_NONRTRIC_NAMESPACE
+
+       export RAPP_CAT_IMAGE
+       export RAPP_CAT_INTERNAL_PORT
+       export RAPP_CAT_INTERNAL_SECURE_PORT
+       export RAPP_CAT_EXTERNAL_PORT
+       export RAPP_CAT_EXTERNAL_SECURE_PORT
+}
+
 # Start the RAPP Catalogue container
 # args: -
 # (Function for test scripts)
@@ -158,13 +178,7 @@ start_rapp_catalogue() {
                        #Check if nonrtric namespace exists, if not create it
                        __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
 
-                       export RAPP_CAT_APP_NAME
-                       export KUBE_NONRTRIC_NAMESPACE
-                       export RAPP_CAT_IMAGE
-                       export RAPP_CAT_INTERNAL_PORT
-                       export RAPP_CAT_INTERNAL_SECURE_PORT
-                       export RAPP_CAT_EXTERNAL_PORT
-                       export RAPP_CAT_EXTERNAL_SECURE_PORT
+                       __rapp_catalogue_export_vars
 
                        #Create service
                        input_yaml=$SIM_GROUP"/"$RAPP_CAT_COMPOSE_DIR"/"svc.yaml
@@ -177,25 +191,8 @@ start_rapp_catalogue() {
                        __kube_create_instance app $RAPP_CAT_APP_NAME $input_yaml $output_yaml
                fi
 
-               echo " Retrieving host and ports for service..."
-               RC_HOST_NAME=$(__kube_get_service_host $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
-
-               RAPP_CAT_EXTERNAL_PORT=$(__kube_get_service_port $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
-               RAPP_CAT_EXTERNAL_SECURE_PORT=$(__kube_get_service_port $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $RC_HOST_NAME $RAPP_CAT_EXTERNAL_PORT $RAPP_CAT_EXTERNAL_SECURE_PORT"
-               if [ $RC_HTTPX == "http" ]; then
-                       RC_PATH=$RC_HTTPX"://"$RC_HOST_NAME":"$RAPP_CAT_EXTERNAL_PORT
-               else
-                       RC_PATH=$RC_HTTPX"://"$RC_HOST_NAME":"$RAPP_CAT_EXTERNAL_SECURE_PORT
-               fi
-
-               __check_service_start $RAPP_CAT_APP_NAME $RC_PATH$RAPP_CAT_ALIVE_URL
+               __check_service_start $RAPP_CAT_APP_NAME $RC_SERVICE_PATH$RAPP_CAT_ALIVE_URL
 
-               # Update the curl adapter if set to rest, no change if type dmaap
-               if [ $RC_ADAPTER_TYPE == "REST" ]; then
-                       RC_ADAPTER=$RC_PATH
-               fi
        else
                __check_included_image 'RC'
                if [ $? -eq 1 ]; then
@@ -204,18 +201,11 @@ start_rapp_catalogue() {
                        exit
                fi
 
-               export RAPP_CAT_APP_NAME
-        export RAPP_CAT_INTERNAL_PORT
-        export RAPP_CAT_EXTERNAL_PORT
-        export RAPP_CAT_INTERNAL_SECURE_PORT
-        export RAPP_CAT_EXTERNAL_SECURE_PORT
-        export DOCKER_SIM_NWNAME
-
-               export RAPP_CAT_DISPLAY_NAME
+               __rapp_catalogue_export_vars
 
                __start_container $RAPP_CAT_COMPOSE_DIR "" NODOCKERARGS 1 $RAPP_CAT_APP_NAME
 
-               __check_service_start $RAPP_CAT_APP_NAME $RC_PATH$RAPP_CAT_ALIVE_URL
+               __check_service_start $RAPP_CAT_APP_NAME $RC_SERVICE_PATH$RAPP_CAT_ALIVE_URL
        fi
        echo ""
 }
@@ -230,7 +220,7 @@ start_rapp_catalogue() {
 rc_equal() {
        if [ $# -eq 2 ] || [ $# -eq 3 ]; then
                #__var_test RC "$LOCALHOST_HTTP:$RC_EXTERNAL_PORT/" $1 "=" $2 $3
-               __var_test RC "$RC_PATH/" $1 "=" $2 $3
+               __var_test RC "$RC_SERVICE_PATH/" $1 "=" $2 $3
        else
                __print_err "Wrong args to ecs_equal, needs two or three args: <sim-param> <target-value> [ timeout ]" $@
        fi
index 0df800f..f760313 100644 (file)
@@ -51,29 +51,44 @@ __RICSIM_image_data() {
 # All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
 # This function is called for apps fully managed by the test script
 __RICSIM_kube_scale_zero() {
-       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest RICSIM
+       __kube_scale_all_resources $KUBE_A1SIM_NAMESPACE autotest RICSIM
 }
 
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
 # This function is called for prestarted apps not managed by the test script.
 __RICSIM_kube_scale_zero_and_wait() {
-       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app nonrtric-a1simulator
+       #__kube_scale_and_wait_all_resources $KUBE_A1SIM_NAMESPACE app $KUBE_A1SIM_NAMESPACE"-"$RIC_SIM_PREFIX
+       __kube_scale_and_wait_all_resources $KUBE_A1SIM_NAMESPACE app # the values of the app label is not known
 }
 
 # Delete all kube resouces for the app
 # This function is called for apps managed by the test script.
 __RICSIM_kube_delete_all() {
-       __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest RICSIM
+       __kube_delete_all_resources $KUBE_A1SIM_NAMESPACE autotest RICSIM
 }
 
 # Store docker logs
 # This function is called for apps managed by the test script.
 # args: <log-dir> <file-prexix>
 __RICSIM_store_docker_logs() {
-       rics=$(docker ps --filter "name=$RIC_SIM_PREFIX" --filter "network=$DOCKER_SIM_NWNAME" --filter "status=running" --format {{.Names}})
-       for ric in $rics; do
-               docker logs $ric > $1$2_$ric.log 2>&1
-       done
+       if [ $RUNMODE == "KUBE" ]; then
+               for podname in $(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -l "autotest=RICSIM" -o custom-columns=":metadata.name"); do
+                       kubectl logs -n $KUBE_A1SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+               done
+       else
+
+               rics=$(docker ps --filter "name=$RIC_SIM_PREFIX" --filter "network=$DOCKER_SIM_NWNAME" --filter "status=running" --format {{.Names}})
+               for ric in $rics; do
+                       docker logs $ric > $1$2_$ric.log 2>&1
+               done
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__RICSIM_initial_setup() {
+       use_simulator_http
 }
 
 #######################################################
@@ -152,7 +167,7 @@ start_ric_simulators() {
                        echo -e " Using existing $1 statefulset and service"
                        echo " Using existing simulator deployment and service for statefulset $1"
                        echo " Setting $1 replicas=$2"
-                       __kube_scale statefulset $1 $KUBE_NONRTRIC_NAMESPACE $2
+                       __kube_scale statefulset $1 $KUBE_A1SIM_NAMESPACE $2
                        echo ""
                        return
                fi
@@ -199,7 +214,7 @@ start_ric_simulators() {
 
                        #export needed env var for statefulset
                        export RIC_SIM_SET_NAME=$(echo "$1" | tr '_' '-')  #kube does not accept underscore in names
-                       export KUBE_NONRTRIC_NAMESPACE
+                       export KUBE_A1SIM_NAMESPACE
                        export RIC_SIM_IMAGE
                        #Adding 1 more instance, instance 0 is never used. This is done to keep test scripts compatible
                        # with docker that starts instance index on 1.....
@@ -211,7 +226,7 @@ start_ric_simulators() {
                        echo -e " Creating $POLICY_AGENT_APP_NAME app and expose service"
 
                        #Check if nonrtric namespace exists, if not create it
-                       __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
+                       __kube_create_namespace $KUBE_A1SIM_NAMESPACE
 
                        # Create service
                        input_yaml=$SIM_GROUP"/"$RIC_SIM_COMPOSE_DIR"/"svc.yaml
@@ -266,18 +281,10 @@ start_ric_simulators() {
                cntr=1
                while [ $cntr -le $2 ]; do
                        app=$1"_"$cntr
-                       localport=0
-
-                       while [ $localport -eq 0 ]; do
-                               echo -ne " Waiting for container ${app} to publish its ports...${SAMELINE}"
-                               localport=$(__find_sim_port $app)
-                               sleep 0.5
-                               echo -ne " Waiting for container ${app} to publish its ports...retrying....${SAMELINE}"
-                       done
-                       echo -e " Waiting for container ${app} to publish its ports...retrying....$GREEN OK $EGREEN"
-                       __check_service_start $app $RIC_SIM_HOST":"$localport$RIC_SIM_ALIVE_URL
+                       __check_service_start $app $RIC_SIM_HTTPX"://"$app:$RIC_SIM_PORT$RIC_SIM_ALIVE_URL
                        let cntr=cntr+1
                done
+
        fi
        echo ""
        return 0
@@ -291,21 +298,7 @@ get_kube_sim_host() {
        #example gnb_1_2 -> gnb-1-2
        set_name=$(echo $name | rev | cut -d- -f2- | rev) # Cut index part of ric name to get the name of statefulset
        # example gnb-g1-2 -> gnb-g1 where gnb-g1-2 is the ric name and gnb-g1 is the set name
-       echo $name"."$set_name"."$KUBE_NONRTRIC_NAMESPACE
-}
-
-# Helper function to get a the port of a specific ric simulator
-# args: <ric-id>
-# (Not for test scripts)
-__find_sim_port() {
-    name=$1" " #Space appended to prevent matching 10 if 1 is desired....
-    cmdstr="docker inspect --format='{{(index (index .NetworkSettings.Ports \"$RIC_SIM_PORT/tcp\") 0).HostPort}}' ${name}"
-    res=$(eval $cmdstr)
-       if [[ "$res" =~ ^[0-9]+$ ]]; then
-               echo $res
-       else
-               echo "0"
-    fi
+       echo $name"."$set_name"."$KUBE_A1SIM_NAMESPACE
 }
 
 # Helper function to get a the port and host name of a specific ric simulator
@@ -313,28 +306,13 @@ __find_sim_port() {
 # (Not for test scripts)
 __find_sim_host() {
        if [ $RUNMODE == "KUBE" ]; then
-               ricname=$(echo "$1" | tr '_' '-')
-               for timeout in {1..500}; do   # long waiting time needed in case of starting large number of sims
-                       host=$(kubectl get pod $ricname  -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.status.podIP}' 2> /dev/null)
-                       if [ ! -z "$host" ]; then
-                               echo $RIC_SIM_HTTPX"://"$host":"$RIC_SIM_PORT
-                               return 0
-                       fi
-                       sleep 0.5
-               done
-               echo "host-not-found-fatal-error"
+               ricname=$(echo "$1" | tr '_' '-') # Kube does not accept underscore in names as docker do
+               ric_setname="${ricname%-*}"  #Extract the stateful set name
+               echo $RIC_SIM_HTTPX"://"$ricname.$ric_setname.$KUBE_A1SIM_NAMESPACE":"$RIC_SIM_PORT
        else
-               name=$1" " #Space appended to prevent matching 10 if 1 is desired....
-               cmdstr="docker inspect --format='{{(index (index .NetworkSettings.Ports \"$RIC_SIM_PORT/tcp\") 0).HostPort}}' ${name}"
-               res=$(eval $cmdstr)
-               if [[ "$res" =~ ^[0-9]+$ ]]; then
-                       echo $RIC_SIM_HOST:$res
-                       return 0
-               else
-                       echo "0"
-               fi
+               echo $RIC_SIM_HTTPX"://"$1":"$RIC_SIM_PORT
+
        fi
-       return 1
 }
 
 # Generate a UUID to use as prefix for policy ids
@@ -349,13 +327,11 @@ generate_policy_uuid() {
 __execute_curl_to_sim() {
        echo ${FUNCNAME[1]} "line: "${BASH_LINENO[1]} >> $HTTPLOG
        proxyflag=""
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       if [ $KUBE_PROXY_HTTPX == "http" ]; then
-                               proxyflag=" --proxy $KUBE_PROXY_PATH"
-                       else
-                               proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
-                       fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
                fi
        fi
        echo " CMD: $2 $proxyflag" >> $HTTPLOG
index 59acba3..8344f38 100755 (executable)
@@ -138,6 +138,10 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
 
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA SDNC"
@@ -157,7 +161,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -206,6 +212,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
 
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
@@ -307,6 +314,10 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index 54e6fbe..00e5d4b 100755 (executable)
@@ -161,6 +161,11 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA SDNC"
 
@@ -180,7 +185,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -249,6 +256,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -374,6 +382,10 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index 829da8f..f8c411f 100644 (file)
@@ -164,6 +164,11 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA SDNC"
 
@@ -183,7 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -252,6 +259,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt2"                             # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -393,6 +401,10 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index 91b153a..43077ea 100755 (executable)
@@ -165,6 +165,11 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA ECS CP SDNC RC RICSIM"
 
@@ -183,7 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -252,6 +259,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
 
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
@@ -374,6 +382,11 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index 385dafe..cc510d5 100755 (executable)
@@ -184,6 +184,11 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA ECS CP RC RICSIM NGW"  # Add SDNC here if oran image is used
 
@@ -202,7 +207,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -227,7 +234,6 @@ POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container fo
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
 POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
-POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
@@ -272,6 +278,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
 
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
@@ -438,6 +445,11 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index 9b83044..e2b53da 100755 (executable)
@@ -129,6 +129,19 @@ RIC_SIM_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.0-SNAPSHOT"
 RIC_SIM_IMAGE_TAG_REMOTE="2.2.0"
 RIC_SIM_IMAGE_TAG_REMOTE_RELEASE="2.2.0"
 
+# DMAAP Mediator Service
+DMAAP_MED_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-mediator-producer"
+DMAAP_MED_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE="1.0.0"
+DMAAP_MED_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+
+# DMAAP Adapter Service
+DMAAP_ADP_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-adaptor"
+DMAAP_ADP_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE="1.0.0"
+DMAAP_ADP_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
 
 #Consul remote image and tag
 CONSUL_IMAGE_BASE="consul"
@@ -166,17 +179,17 @@ HTTP_PROXY_IMAGE_TAG_LOCAL="latest"
 
 #ONAP Zookeeper remote image and tag
 ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
-ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
 #No local image for ONAP Zookeeper, remote image always used
 
 #ONAP Kafka remote image and tag
 ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
-ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
 #No local image for ONAP Kafka, remote image always used
 
 #ONAP DMAAP-MR remote image and tag
 ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
-ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
@@ -184,8 +197,13 @@ KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
 KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
 #No remote image for kube proxy, local image always used
 
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
 # List of app short names produced by the project
-PROJECT_IMAGES_APP_NAMES="PA ECS CP RC RICSIM NGW"  # Add SDNC here if oran image is used
+PROJECT_IMAGES_APP_NAMES="PA ECS CP RC RICSIM NGW DMAAPADP DMAAPMED"  # Add SDNC here if oran image is used
 
 # List of app short names which images pulled from ORAN
 ORAN_IMAGES_APP_NAMES=""  # Not used
@@ -198,11 +216,26 @@ ONAP_IMAGES_APP_NAMES="CBS DMAAPMR SDNC"   # SDNC added as ONAP image
 # Detailed settings per app
 ########################################
 
+# Port number variables
+# =====================
+# Port number vars <name>_INTERNAL_PORT and <name>_INTERNAL_SECURE_PORT are set as pod/container port in kube and container port in docker
+#
+# Port number vars <name>_EXTERNAL_PORT and <name>_EXTERNAL_SECURE_PORT are set as svc port in kube and localhost port in docker
+#
+# For some components, eg. MR, can be represented as the MR-STUB and/or the DMAAP MR. For these components
+# special vars nameed <name>_LOSTHOST_PORT and <name>_LOCALHOST_SECURE_PORT are used as localhost ports instead of
+# name>_EXTERNAL_PORT and <name>_EXTERNAL_SECURE_PORT ports in docker in order to prevent overalapping ports on local host
+#
+# For KUBE PROXY there are special external port for docker as the proyx exposes also the kube svc port on localhost,
+# therefore a special set of external port are needed for docker <name>_DOCKER_EXTERNAL_PORT and <name>_DOCKER_EXTERNAL_SECURE_PORT
+
 DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker private network
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -227,7 +260,6 @@ POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container fo
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
 POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
-POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
@@ -272,6 +304,7 @@ MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_grou
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
 MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_DMAAP_HOST_MNT_DIR="/mnt2"                            # Config files dir on localhost
 
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
@@ -281,6 +314,7 @@ CR_INTERNAL_PORT=8090                                    # Callback receiver con
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -434,10 +468,58 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+DMAAP_ADP_APP_NAME="dmaapadapterservice"                 # Name for Dmaap Adapter container
+DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service"           # Display name for Dmaap Adapter container
+DMAAP_ADP_EXTERNAL_PORT=9087                             # Dmaap Adapter container external port (host -> container)
+DMAAP_ADP_INTERNAL_PORT=8084                             # Dmaap Adapter container internal port (container -> container)
+DMAAP_ADP_EXTERNAL_SECURE_PORT=9088                      # Dmaap Adapter container external secure port (host -> container)
+DMAAP_ADP_INTERNAL_SECURE_PORT=8435                      # Dmaap Adapter container internal secure port (container -> container)
+
+#DMAAP_ADP_LOGPATH="/var/log/dmaap-adaptor-service/application.log" # Path the application log in the Dmaap Adapter container
+DMAAP_ADP_HOST_MNT_DIR="./mnt"                           # Mounted db dir, relative to compose file, on the host
+#MAAP_ADP_CONTAINER_MNT_DIR="/var/dmaap-adaptor-service" # Mounted dir in the container
+DMAAP_ADP_ACTUATOR="/actuator/loggers/org.oran.dmaapadapter"   # Url for trace/debug
+#DMAAP_ADP_CERT_MOUNT_DIR="./cert"
+DMAAP_ADP_ALIVE_URL="/actuator/info"                     # Base path for alive check
+DMAAP_ADP_COMPOSE_DIR="dmaapadp"                         # Dir in simulator_group for docker-compose
+DMAAP_ADP_CONFIG_MOUNT_PATH="/opt/app/dmaap-adaptor-service/config" # Internal container path for configuration
+DMAAP_ADP_DATA_MOUNT_PATH="/opt/app/dmaap-adaptor-service/data" # Path in container for data file
+DMAAP_ADP_DATA_FILE="application_configuration.json"  # Container data file name
+DMAAP_ADP_CONFIG_FILE=application.yaml                   # Config file name
+
+DMAAP_MED_APP_NAME="dmaapmediatorservice"                # Name for Dmaap Mediator container
+DMAAP_MED_DISPLAY_NAME="Dmaap Mediator Service"          # Display name for Dmaap Mediator container
+DMAAP_MED_EXTERNAL_PORT=8085                             # Dmaap Mediator container external port (host -> container)
+DMAAP_MED_INTERNAL_PORT=8085                             # Dmaap Mediator container internal port (container -> container)
+DMAAP_MED_EXTERNAL_SECURE_PORT=8185                      # Dmaap Mediator container external secure port (host -> container)
+DMAAP_MED_INTERNAL_SECURE_PORT=8185                      # Dmaap Mediator container internal secure port (container -> container)
+
+DMAAP_MED_LOGPATH="/var/log/dmaap-adaptor-service/application.log" # Path the application log in the Dmaap Mediator container
+DMAAP_MED_HOST_MNT_DIR="./mnt"                          # Mounted db dir, relative to compose file, on the host
+#MAAP_ADP_CONTAINER_MNT_DIR="/var/dmaap-adaptor-service" # Mounted dir in the container
+#DMAAP_MED_ACTUATOR="/actuator/loggers/org.oransc.enrichment"   # Url for trace/debug
+#DMAAP_MED_CERT_MOUNT_DIR="./cert"
+DMAAP_MED_ALIVE_URL="/status"                            # Base path for alive check
+DMAAP_MED_COMPOSE_DIR="dmaapmed"                         # Dir in simulator_group for docker-compose
+#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json"  # Container data file name
+#DMAAP_MED_CONFIG_FILE=application.yaml                   # Config file name
+
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 ########################################
 # Setting for common curl-base function
 ########################################
index b63d4d1..8d832d7 100755 (executable)
@@ -28,6 +28,7 @@ __print_args() {
        echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
        echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
        echo "      [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
+       echo "      [--override <override-environment-filename> --pre-clean]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -43,7 +44,7 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "remote-remove         -  Same as 'remote' but will also try to pull fresh images from remote repositories"
        echo "docker                -  Test executed in docker environment"
        echo "kube                  -  Test executed in kubernetes environment - requires an already started kubernetes environment"
-       echo "--env-file            -  The script will use the supplied file to read environment variables from"
+       echo "--env-file  <file>    -  The script will use the supplied file to read environment variables from"
        echo "release               -  If this flag is given the script will use release version of the images"
        echo "auto-clean            -  If the function 'auto_clean_containers' is present in the end of the test script then all containers will be stopped and removed. If 'auto-clean' is not given then the function has no effect."
     echo "--stop-at-error       -  The script will stop when the first failed test or configuration"
@@ -56,6 +57,8 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "--repo-policy         -  Policy controlling which images to re-tag and push if param --image-repo is set. Default is 'local'"
        echo "--cluster-timeout     -  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds. "
        echo "--print-stats         -  Print current test stats after each test."
+       echo "--override <file>     -  Override setting from the file supplied by --env-file"
+       echo "--pre-clean           -  Will clean kube resouces when running docker and vice versa"
 
        echo ""
        echo "List of app short names supported: "$APP_SHORT_NAMES
@@ -87,6 +90,8 @@ echo -ne $EBOLD
 
 # default test environment variables
 TEST_ENV_VAR_FILE=""
+#Override env file, will be added on top of the above file
+TEST_ENV_VAR_FILE_OVERRIDE=""
 
 echo "Test case started as: ${BASH_SOURCE[$i+1]} "$@
 
@@ -98,6 +103,9 @@ LOCALHOST_HTTPS="https://localhost"
 # Var to hold 'auto' in case containers shall be stopped when test case ends
 AUTO_CLEAN=""
 
+# Var to indicate pre clean, if flag --pre-clean is set the script will clean kube resouces when running docker and vice versa
+PRE_CLEAN="0"
+
 # Var to hold the app names to use local images for
 USE_LOCAL_IMAGES=""
 
@@ -606,6 +614,36 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do
                        fi
                fi
        fi
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--override" ]; then
+                       shift;
+                       TEST_ENV_VAR_FILE_OVERRIDE=$1
+                       if [ -z "$1" ]; then
+                               paramerror=1
+                               if [ -z "$paramerror_str" ]; then
+                                       paramerror_str="No env file found for flag: '--override'"
+                               fi
+                       else
+                               if [ ! -f $TEST_ENV_VAR_FILE_OVERRIDE ]; then
+                                       paramerror=1
+                                       if [ -z "$paramerror_str" ]; then
+                                               paramerror_str="File for '--override' does not exist : "$TEST_ENV_VAR_FILE_OVERRIDE
+                                       fi
+                               fi
+                               echo "Option set - Override env from: "$1
+                               shift;
+                               foundparm=0
+                       fi
+               fi
+       fi
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--pre-clean" ]; then
+                       PRE_CLEAN=1
+                       echo "Option set - Pre-clean of kube/docker resouces"
+                       shift;
+                       foundparm=0
+               fi
+       fi
        if [ $paramerror -eq 0 ]; then
                if [ "$1" == "--print-stats" ]; then
                        PRINT_CURRENT_STATS=1
@@ -635,6 +673,10 @@ fi
 if [ -f "$TEST_ENV_VAR_FILE" ]; then
        echo -e $BOLD"Sourcing env vars from: "$TEST_ENV_VAR_FILE$EBOLD
        . $TEST_ENV_VAR_FILE
+       if [ ! -z "$TEST_ENV_VAR_FILE_OVERRIDE" ]; then
+               echo -e $BOLD"Sourcing override env vars from: "$TEST_ENV_VAR_FILE_OVERRIDE$EBOLD
+               . $TEST_ENV_VAR_FILE_OVERRIDE
+       fi
 
        if [ -z "$TEST_ENV_PROFILE" ] || [ -z "$SUPPORTED_PROFILES" ]; then
                echo -e $YELLOW"This test case may not work with selected test env file. TEST_ENV_PROFILE is missing in test_env file or SUPPORTED_PROFILES is missing in test case file"$EYELLOW
@@ -684,6 +726,28 @@ else
        INCLUDED_IMAGES=$DOCKER_INCLUDED_IMAGES
 fi
 
+echo ""
+# auto adding system apps
+echo -e $BOLD"Auto adding system apps"$EBOLD
+if [ $RUNMODE == "KUBE" ]; then
+       INCLUDED_IMAGES=$INCLUDED_IMAGES" "$TESTENV_KUBE_SYSTEM_APPS
+       TMP_APPS=$TESTENV_KUBE_SYSTEM_APPS
+else
+       INCLUDED_IMAGES=$INCLUDED_IMAGES" "$TESTENV_DOCKER_SYSTEM_APPS
+       TMP_APPS=$TESTENV_DOCKER_SYSTEM_APPS
+fi
+if [ ! -z "$TMP_APPS" ]; then
+       for iapp in "$TMP_APPS"; do
+               file_pointer=$(echo $iapp | tr '[:upper:]' '[:lower:]')
+               file_pointer="../common/"$file_pointer"_api_functions.sh"
+               echo " Auto-adding system app $iapp.  Sourcing $file_pointer"
+               . $file_pointer
+       done
+else
+       echo " None"
+fi
+echo ""
+
 # Check needed installed sw
 tmp=$(which python3)
 if [ $? -ne 0 ] || [ -z tmp ]; then
@@ -703,6 +767,15 @@ if [ $? -ne 0 ] || [ -z tmp ]; then
                exit 1
        fi
 fi
+if [ $RUNMODE == "DOCKER" ]; then
+       tmp=$(docker-compose version | grep -i 'Docker Compose version')
+       if [[ "$tmp" == *'v2'* ]]; then
+               echo -e $RED"docker-compose is using docker-compose version 2"$ERED
+               echo -e $RED"The test environment only support version 1"$ERED
+               echo -e $RED"Disable version 2 by cmd 'docker-compose disable-v2' and re-run the script "$ERED
+               exit 1
+       fi
+fi
 
 tmp=$(which kubectl)
 if [ $? -ne 0 ] || [ -z tmp ]; then
@@ -1376,6 +1449,24 @@ setup_testenvironment() {
        echo -e $BOLD"======================================================="$EBOLD
        echo ""
 
+       for imagename in $APP_SHORT_NAMES; do
+               __check_included_image $imagename
+               retcode_i=$?
+               __check_prestarted_image $imagename
+               retcode_p=$?
+               if [ $retcode_i -eq 0 ] || [ $retcode_p -eq 0 ]; then
+                       # A function name is created from the app short name
+                       # for example app short name 'RICMSIM' -> produce the function
+                       # name __RICSIM__initial_setup
+                       # This function is called and is expected to exist in the imported
+                       # file for the ricsim test functions
+                       # The resulting function impl shall perform initial setup of port, host etc
+
+                       function_pointer="__"$imagename"_initial_setup"
+                       $function_pointer
+               fi
+       done
+
 }
 
 # Function to print the test result, shall be the last cmd in a test script
@@ -1577,6 +1668,9 @@ __clean_containers() {
                docker ps -a --filter "label=nrttest_app=$imagename"  --filter "network=$DOCKER_SIM_NWNAME" --format ' {{.Label "nrttest_dp"}}\n{{.Label "nrttest_app"}}\n{{.Names}}' >> $running_contr_file
        done
 
+       # Kill all containers started by the test env - to speed up shut down
+    docker kill $(docker ps -a  --filter "label=nrttest_app" --format '{{.Names}}') &> /dev/null
+
        tab_heading1="App display name"
        tab_heading2="App short name"
        tab_heading3="Container name"
@@ -1767,39 +1861,47 @@ __kube_scale_all_resources() {
        for restype in $resources; do
                result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
-                       deleted_resourcetypes=$deleted_resourcetypes" "$restype
                        for resid in $result; do
-                               echo -ne "  Ordered caling $restype $resid from namespace $namespace with label $labelname=$labelid to 0"$SAMELINE
+                               echo -ne "  Ordered caling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"$SAMELINE
                                kubectl scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
-                               echo -e "  Ordered scaling $restype $resid from namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN"
+                               echo -e "  Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN"
                        done
                fi
        done
 }
 
-# Scale all kube resource sets to 0 in a namespace for resources having a certain lable and label-id
+# Scale all kube resource sets to 0 in a namespace for resources having a certain lable and an optional label-id
 # This function do wait for the resource to reach 0
-# args: <namespace> <label-name> <label-id>
+# args: <namespace> <label-name> [ <label-id> ]
 # (Not for test scripts)
 __kube_scale_and_wait_all_resources() {
        namespace=$1
        labelname=$2
        labelid=$3
+       if [ -z "$3" ]; then
+               echo "  Attempt to scale - deployment replicaset statefulset - in namespace $namespace with label $labelname"
+       else
+               echo "  Attempt to scale - deployment replicaset statefulset - in namespace $namespace with label $labelname=$labelid"
+       fi
        resources="deployment replicaset statefulset"
        scaled_all=1
        while [ $scaled_all -ne 0 ]; do
                scaled_all=0
                for restype in $resources; do
-                       result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+                   if [ -z "$3" ]; then
+                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}')
+                       else
+                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+                       fi
                        if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                                for resid in $result; do
-                                       echo -e "  Ordered scaling $restype $resid from namespace $namespace with label $labelname=$labelid to 0"
+                                       echo -e "   Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"
                                        kubectl scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
                                        count=1
                                        T_START=$SECONDS
                                        while [ $count -ne 0 ]; do
                                                count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
-                                               echo -ne "  Scaling $restype $resid from namespace $namespace with label $labelname=$labelid to 0,count=$count"$SAMELINE
+                                               echo -ne "    Scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count"$SAMELINE
                                                if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                                        sleep 0.5
                                                else
@@ -1812,7 +1914,7 @@ __kube_scale_and_wait_all_resources() {
                                                        count=0
                                                fi
                                        done
-                                       echo -e "  Scaled $restype $resid from namespace $namespace with label $labelname=$labelid to 0,count=$count $GREEN OK $EGREEN"
+                                       echo -e "    Scaled $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN"
                                done
                        fi
                done
@@ -1830,29 +1932,35 @@ __kube_delete_all_resources() {
        resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes"
        deleted_resourcetypes=""
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+               ns_flag="-n $namespace"
+               ns_text="in namespace $namespace"
+               if [ $restype == "persistentvolumes" ]; then
+                       ns_flag=""
+                       ns_text=""
+               fi
+               result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        deleted_resourcetypes=$deleted_resourcetypes" "$restype
                        for resid in $result; do
                                if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then
                                        count=1
                                        while [ $count -ne 0 ]; do
-                                               count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
-                                               echo -ne "  Scaling $restype $resid from namespace $namespace with label $labelname=$labelid to 0,count=$count"$SAMELINE
+                                               count=$(kubectl get $restype $resid  $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null)
+                                               echo -ne "  Scaling $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count"$SAMELINE
                                                if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                                        sleep 0.5
                                                else
                                                        count=0
                                                fi
                                        done
-                                       echo -e "  Scaled $restype $resid from namespace $namespace with label $labelname=$labelid to 0,count=$count $GREEN OK $EGREEN"
+                                       echo -e "  Scaled $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN"
                                fi
-                               echo -ne "  Deleting $restype $resid from namespace $namespace with label $labelname=$labelid "$SAMELINE
-                               kubectl delete $restype $resid -n $namespace 1> /dev/null 2> ./tmp/kubeerr
+                               echo -ne "  Deleting $restype $resid $ns_text with label $labelname=$labelid "$SAMELINE
+                               kubectl delete $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr
                                if [ $? -eq 0 ]; then
-                                       echo -e "  Deleted $restype $resid from namespace $namespace with label $labelname=$labelid $GREEN OK $EGREEN"
+                                       echo -e "  Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN OK $EGREEN"
                                else
-                                       echo -e "  Deleted $restype $resid from namespace $namespace with label $labelname=$labelid $GREEN Does not exist - OK $EGREEN"
+                                       echo -e "  Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN Does not exist - OK $EGREEN"
                                fi
                                #fi
                        done
@@ -1860,17 +1968,23 @@ __kube_delete_all_resources() {
        done
        if [ ! -z "$deleted_resourcetypes" ]; then
                for restype in $deleted_resources; do
-                       echo -ne "  Waiting for $restype in namespace $namespace with label $labelname=$labelid to be deleted..."$SAMELINE
+                       ns_flag="-n $namespace"
+                       ns_text="in namespace $namespace"
+                       if [ $restype == "persistentvolumes" ]; then
+                               ns_flag=""
+                               ns_text=""
+                       fi
+                       echo -ne "  Waiting for $restype $ns_text with label $labelname=$labelid to be deleted..."$SAMELINE
                        T_START=$SECONDS
                        result="dummy"
                        while [ ! -z "$result" ]; do
                                sleep 0.5
-                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
-                               echo -ne "  Waiting for $restype in namespace $namespace with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
+                               result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+                               echo -ne "  Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
                                if [ -z "$result" ]; then
-                                       echo -e " Waiting for $restype in namespace $namespace with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
+                                       echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
                                elif [ $(($SECONDS-$T_START)) -gt 300 ]; then
-                                       echo -e " Waiting for $restype in namespace $namespace with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $RED Failed $ERED"
+                                       echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $RED Failed $ERED"
                                        result=""
                                fi
                        done
@@ -2028,6 +2142,41 @@ __kube_create_configmap() {
        return 0
 }
 
+# Function to create a configmap in kubernetes
+# args: <configmap-name> <namespace> <labelname> <labelid> <path-to-data-file> <path-to-output-yaml>
+# (Not for test scripts)
+__kube_create_configmapXXXXXXXXXXXXX() {
+       echo -ne " Creating configmap $1 "$SAMELINE
+       #envsubst < $5 > $5"_tmp"
+       #cp $5"_tmp" $5  #Need to copy back to orig file name since create configmap neeed the original file name
+       kubectl create configmap $1  -n $2 --from-file=$5 --dry-run=client -o yaml > $6
+       if [ $? -ne 0 ]; then
+               echo -e " Creating configmap $1 $RED Failed $ERED"
+               ((RES_CONF_FAIL++))
+               return 1
+       fi
+
+       kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
+       if [ $? -ne 0 ]; then
+               echo -e " Creating configmap $1 $RED Apply failed $ERED"
+               echo "  Message: $(<./tmp/kubeerr)"
+               ((RES_CONF_FAIL++))
+               return 1
+       fi
+       kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
+       if [ $? -ne 0 ]; then
+               echo -e " Creating configmap $1 $RED Labeling failed $ERED"
+               echo "  Message: $(<./tmp/kubeerr)"
+               ((RES_CONF_FAIL++))
+               return 1
+       fi
+       # Log the resulting map
+       kubectl get configmap $1 -n $2 -o yaml > $6
+
+       echo -e " Creating configmap $1 $GREEN OK $EGREEN"
+       return 0
+}
+
 # This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
 # The function retries up to the timeout given in the cmd flag '--cluster-timeout'
 # args: <full kubectl cmd with parameters>
@@ -2055,17 +2204,19 @@ __kube_cmd_with_timeout() {
 # (Not for test scripts)
 __kube_clean_pvc() {
 
+       #using env vars setup in pvccleaner_api_functions.sh
+
        export PVC_CLEANER_NAMESPACE=$2
        export PVC_CLEANER_CLAIMNAME=$3
        export PVC_CLEANER_RM_PATH=$4
-       input_yaml=$SIM_GROUP"/pvc-cleaner/"pvc-cleaner.yaml
+       input_yaml=$SIM_GROUP"/"$PVC_CLEANER_COMPOSE_DIR"/"pvc-cleaner.yaml
        output_yaml=$PWD/tmp/$2-pvc-cleaner.yaml
 
        envsubst < $input_yaml > $output_yaml
 
        kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null   # Delete the previous terminated pod - if existing
 
-       __kube_create_instance pod pvc-cleaner $input_yaml $output_yaml
+       __kube_create_instance pod $PVC_CLEANER_APP_NAME $input_yaml $output_yaml
        if [ $? -ne 0 ]; then
                echo $YELLOW" Could not clean pvc for app: $1 - persistent storage not clean - tests may not work"
                return 1
@@ -2085,7 +2236,7 @@ __kube_clean_pvc() {
 # args: -
 # (Not for test scripts)
 __clean_kube() {
-       echo -e $BOLD"Initialize kube services//pods/statefulsets/replicaset to initial state"$EBOLD
+       echo -e $BOLD"Initialize kube pods/statefulsets/replicaset to initial state"$EBOLD
 
        # Scale prestarted or managed apps
        for imagename in $APP_SHORT_NAMES; do
@@ -2140,8 +2291,16 @@ __clean_kube() {
 clean_environment() {
        if [ $RUNMODE == "KUBE" ]; then
                __clean_kube
+               if [ $PRE_CLEAN -eq 1 ]; then
+                       echo " Clean docker resouces to free up resources, may take time..."
+                       ../common/clean_docker.sh 2&>1 /dev/null
+               fi
        else
                __clean_containers
+               if [ $PRE_CLEAN -eq 1 ]; then
+                       echo " Clean kubernetes resouces to free up resources, may take time..."
+                       ../common/clean_kube.sh 2&>1 /dev/null
+               fi
        fi
 }
 
@@ -2452,13 +2611,11 @@ store_logs() {
 __do_curl() {
        echo ${FUNCNAME[1]} "line: "${BASH_LINENO[1]} >> $HTTPLOG
        proxyflag=""
-       if [ $RUNMODE == "KUBE" ]; then
-               if [ ! -z "$KUBE_PROXY_PATH" ]; then
-                       if [ $KUBE_PROXY_HTTPX == "http" ]; then
-                               proxyflag=" --proxy $KUBE_PROXY_PATH"
-                       else
-                               proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
-                       fi
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
                fi
        fi
        curlString="curl -skw %{http_code} $proxyflag $@"
index e914f72..3513464 100644 (file)
 #
 
 # List of short names for all supported apps, including simulators etc
-APP_SHORT_NAMES="PA ECS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY"
+APP_SHORT_NAMES="PA ECS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY DMAAPMED DMAAPADP PVCCLEANER"
 
 # List of available apps that built and released of the project
-PROJECT_IMAGES="PA ECS SDNC CP NGW RICSIM RC"
+PROJECT_IMAGES="PA ECS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
 
 # List of available apps to override with local or remote staging/snapshot/release image
-AVAILABLE_IMAGES_OVERRIDE="PA ECS SDNC CP NGW RICSIM RC"
+AVAILABLE_IMAGES_OVERRIDE="PA ECS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
 
 # List of available apps where the image is built by the test environment
 LOCAL_IMAGE_BUILD="MR CR PRODSTUB KUBEPROXY HTTPPROXY"
 
+# List of system app used only by the test env - kubernetes
+TESTENV_KUBE_SYSTEM_APPS="PVCCLEANER"
+
+# List of system app used only by the test env - docker
+TESTENV_DOCKER_SYSTEM_APPS=""
+
 
 #Integrate a new app into the test environment
 # 1 Choose a short name for the app
@@ -41,6 +47,7 @@ LOCAL_IMAGE_BUILD="MR CR PRODSTUB KUBEPROXY HTTPPROXY"
 #   This is default...so normally images shall be possible to override
 # 5 If the image is built by the test script,
 #   add the short name to LOCAL_IMAGE_BUILD
+# 6 Special app used only by the test env is added to TESTENV_KUBE_SYSTEM_APPS and/or TESTENV_DOCKER_SYSTEM_APPS
 # Summary:
 # All app short name shall exist in APP_SHORT_NAMES
 # Then the app short name be added to both PROJECT_IMAGES and AVAILABLE_IMAGES_OVERRIDE
index 24c9033..e66d30f 100644 (file)
@@ -20,8 +20,7 @@ ARG NEXUS_PROXY_REPO
 FROM ${NEXUS_PROXY_REPO}python:3.8-slim-buster
 
 #install nginx
-RUN apt-get update
-RUN apt-get install -y nginx=1.14.*
+RUN apt-get update; apt-get install -y nginx=1.14.*
 
 COPY app/ /usr/src/app/
 COPY cert/ /usr/src/app/cert/
index c00f539..55c5cc5 100644 (file)
@@ -48,7 +48,8 @@ There are a number of counters that can be read to monitor the message processin
 ```/counter/fetched_callbacks``` - The total number of fetched callbacks<br>
 ```/counter/current_messages``` - The current number of callback messages waiting to be fetched<br>
 All counters also support the query parameter "id" to fetch counter for one individual id, eg ```/counter/current_messages?id=my-id```
-
+An additional counter is available to log remote hosts calling the server
+```/counter/remote_hosts``` - Lists all unique ip/host name that has sent messages on the callback endpoint<br>
 
 ### Build and start
 
index 9e04d57..4b4d8da 100644 (file)
@@ -24,6 +24,7 @@ import json
 import traceback
 import logging
 import socket
+from threading import RLock
 
 # Disable all logging of GET on reading counters and db
 class AjaxFilter(logging.Filter):
@@ -35,6 +36,8 @@ log.addFilter(AjaxFilter())
 
 app = Flask(__name__)
 
+lock = RLock()
+
 # list of callback messages
 msg_callbacks={}
 
@@ -46,9 +49,11 @@ HOST_PORT = 2222
 cntr_msg_callbacks=0
 cntr_msg_fetched=0
 cntr_callbacks={}
+hosts_set=set()
 
 # Request and response constants
 CALLBACK_URL="/callbacks/<string:id>"
+CALLBACK_MR_URL="/callbacks-mr/<string:id>" #Json list with string encoded items
 APP_READ_URL="/get-event/<string:id>"
 APP_READ_ALL_URL="/get-all-events/<string:id>"
 DUMP_ALL_URL="/db"
@@ -59,6 +64,10 @@ CAUGHT_EXCEPTION="Caught exception: "
 SERVER_ERROR="Server error :"
 TIME_STAMP="cr-timestamp"
 
+forced_settings={}
+forced_settings['delay']=None
+
+
 # Remote host lookup and print host name
 def remote_host_logging(request):
 
@@ -72,8 +81,10 @@ def remote_host_logging(request):
     try:
         name, alias, addresslist = socket.gethostbyaddr(host_ip)
         print("Calling host: "+str(name))
+        hosts_set.add(name)
     except Exception:
         print("Calling host not possible to retrieve IP: "+str(host_ip))
+        hosts_set.add(host_ip)
 
 
 #I'm alive function
@@ -93,22 +104,23 @@ def receiveresponse(id):
     global msg_callbacks
     global cntr_msg_fetched
 
-    try:
-        if ((id in msg_callbacks.keys()) and (len(msg_callbacks[id]) > 0)):
-            cntr_msg_fetched+=1
-            cntr_callbacks[id][1]+=1
-            msg=msg_callbacks[id][0]
-            print("Fetching msg for id: "+id+", msg="+str(msg))
-            del msg[TIME_STAMP]
-            del msg_callbacks[id][0]
-            return json.dumps(msg),200
-        print("No messages for id: "+id)
-    except Exception as e:
-        print(CAUGHT_EXCEPTION+str(e))
-        traceback.print_exc()
-        return "",500
+    with lock:
+        try:
+            if ((id in msg_callbacks.keys()) and (len(msg_callbacks[id]) > 0)):
+                cntr_msg_fetched+=1
+                cntr_callbacks[id][1]+=1
+                msg=msg_callbacks[id][0]
+                print("Fetching msg for id: "+id+", msg="+str(msg))
+                del msg[TIME_STAMP]
+                del msg_callbacks[id][0]
+                return json.dumps(msg),200
+            print("No messages for id: "+id)
+        except Exception as e:
+            print(CAUGHT_EXCEPTION+str(e))
+            traceback.print_exc()
+            return "",500
 
-    return "",204
+        return "",204
 
 # Fetch all callback message for an id in an array
 # URI and parameter, (GET): /get-all-events/<id>
@@ -119,24 +131,25 @@ def receiveresponse_all(id):
     global msg_callbacks
     global cntr_msg_fetched
 
-    try:
-        if ((id in msg_callbacks.keys()) and (len(msg_callbacks[id]) > 0)):
-            cntr_msg_fetched+=len(msg_callbacks[id])
-            cntr_callbacks[id][1]+=len(msg_callbacks[id])
-            msg=msg_callbacks[id]
-            print("Fetching all msgs for id: "+id+", msg="+str(msg))
-            for sub_msg in msg:
-                del sub_msg[TIME_STAMP]
-            del msg_callbacks[id]
-            return json.dumps(msg),200
-        print("No messages for id: "+id)
-    except Exception as e:
-        print(CAUGHT_EXCEPTION+str(e))
-        traceback.print_exc()
-        return "",500
+    with lock:
+        try:
+            if ((id in msg_callbacks.keys()) and (len(msg_callbacks[id]) > 0)):
+                cntr_msg_fetched+=len(msg_callbacks[id])
+                cntr_callbacks[id][1]+=len(msg_callbacks[id])
+                msg=msg_callbacks[id]
+                print("Fetching all msgs for id: "+id+", msg="+str(msg))
+                for sub_msg in msg:
+                    del sub_msg[TIME_STAMP]
+                del msg_callbacks[id]
+                return json.dumps(msg),200
+            print("No messages for id: "+id)
+        except Exception as e:
+            print(CAUGHT_EXCEPTION+str(e))
+            traceback.print_exc()
+            return "",500
 
-    msg=[]
-    return json.dumps(msg),200
+        msg=[]
+        return json.dumps(msg),200
 
 # Receive a callback message
 # URI and payload, (PUT or POST): /callbacks/<id> <json messages>
@@ -150,6 +163,8 @@ def events_write(id):
     try:
         print("Received callback for id: "+id +", content-type="+request.content_type)
         remote_host_logging(request)
+        print("raw data: str(request.data): "+str(request.data))
+        do_delay()
         try:
             if (request.content_type == MIME_JSON):
                 data = request.data
@@ -163,20 +178,79 @@ def events_write(id):
             print("(Exception) Payload does not contain any json, setting empty json as payload")
             traceback.print_exc()
 
-        cntr_msg_callbacks += 1
-        msg[TIME_STAMP]=str(datetime.now())
-        if (id in msg_callbacks.keys()):
-            msg_callbacks[id].append(msg)
-        else:
-            msg_callbacks[id]=[]
-            msg_callbacks[id].append(msg)
-
-        if (id in cntr_callbacks.keys()):
-            cntr_callbacks[id][0] += 1
-        else:
-            cntr_callbacks[id]=[]
-            cntr_callbacks[id].append(1)
-            cntr_callbacks[id].append(0)
+        with lock:
+            cntr_msg_callbacks += 1
+            msg[TIME_STAMP]=str(datetime.now())
+            if (id in msg_callbacks.keys()):
+                msg_callbacks[id].append(msg)
+            else:
+                msg_callbacks[id]=[]
+                msg_callbacks[id].append(msg)
+
+            if (id in cntr_callbacks.keys()):
+                cntr_callbacks[id][0] += 1
+            else:
+                cntr_callbacks[id]=[]
+                cntr_callbacks[id].append(1)
+                cntr_callbacks[id].append(0)
+
+    except Exception as e:
+        print(CAUGHT_EXCEPTION+str(e))
+        traceback.print_exc()
+        return 'NOTOK',500
+
+    return 'OK',200
+
+
+# Receive a json callback message with payload fromatted accoirding to output frm the message router
+# URI and payload, (PUT or POST): /callbacks/<id> <json messages>
+# json is a list of string encoded json items
+# response: OK 200 or 500 for other errors
+@app.route(CALLBACK_MR_URL,
+    methods=['PUT','POST'])
+def events_write_mr(id):
+    global msg_callbacks
+    global cntr_msg_callbacks
+
+    try:
+        print("Received callback (mr) for id: "+id +", content-type="+request.content_type)
+        remote_host_logging(request)
+        print("raw data: str(request.data): "+str(request.data))
+        do_delay()
+        try:
+            #if (request.content_type == MIME_JSON):
+            if (MIME_JSON in request.content_type):
+                data = request.data
+                msg_list = json.loads(data)
+                print("Payload(json): "+str(msg_list))
+            else:
+                msg_list=[]
+                print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
+        except Exception as e:
+            msg_list=[]
+            print("(Exception) Payload does not contain any json, setting empty json as payload")
+            traceback.print_exc()
+
+        with lock:
+            remote_host_logging(request)
+            for msg in msg_list:
+                print("msg (str): "+str(msg))
+                msg=json.loads(msg)
+                print("msg (json): "+str(msg))
+                cntr_msg_callbacks += 1
+                msg[TIME_STAMP]=str(datetime.now())
+                if (id in msg_callbacks.keys()):
+                    msg_callbacks[id].append(msg)
+                else:
+                    msg_callbacks[id]=[]
+                    msg_callbacks[id].append(msg)
+
+                if (id in cntr_callbacks.keys()):
+                    cntr_callbacks[id][0] += 1
+                else:
+                    cntr_callbacks[id]=[]
+                    cntr_callbacks[id].append(1)
+                    cntr_callbacks[id].append(0)
 
     except Exception as e:
         print(CAUGHT_EXCEPTION+str(e))
@@ -233,7 +307,37 @@ def current_messages():
     else:
         return Response(str("0"), status=200, mimetype=MIME_TEXT)
 
+@app.route('/counter/remote_hosts',
+    methods=['GET'])
+def remote_hosts():
+    global hosts_set
 
+    hosts=",".join(hosts_set)
+    return Response(str(hosts), status=200, mimetype=MIME_TEXT)
+
+
+#Set force delay response, in seconds, for all callbacks
+#/froceesponse?delay=<seconds>
+@app.route('/forcedelay', methods=['POST'])
+def forcedelay():
+
+  try:
+    forced_settings['delay']=int(request.args.get('delay'))
+  except Exception:
+    forced_settings['delay']=None
+  return Response("Force delay: " + str(forced_settings['delay']) + " sec set for all callback responses", 200, mimetype=MIME_TEXT)
+
+# Helper: Delay if delayed response code is set
+def do_delay():
+  if (forced_settings['delay'] is not None):
+    try:
+      val=int(forced_settings['delay'])
+      if (val < 1):
+          return Response("Force delay too short: " + str(forced_settings['delay']) + " sec", 500, mimetype=MIME_TEXT)
+      print("Delaying "+str(val)+ " sec.")
+      time.sleep(val)
+    except Exception:
+      return Response("Force delay : " + str(forced_settings['delay']) + " sec failed", 500, mimetype=MIME_TEXT)
 ### Admin ###
 
 # Reset all messsages and counters
@@ -244,13 +348,16 @@ def reset():
     global cntr_msg_fetched
     global cntr_msg_callbacks
     global cntr_callbacks
+    global forced_settings
 
-    msg_callbacks={}
-    cntr_msg_fetched=0
-    cntr_msg_callbacks=0
-    cntr_callbacks={}
+    with lock:
+        msg_callbacks={}
+        cntr_msg_fetched=0
+        cntr_msg_callbacks=0
+        cntr_callbacks={}
+        forced_settings['delay']=None
 
-    return Response('OK', status=200, mimetype=MIME_TEXT)
+        return Response('OK', status=200, mimetype=MIME_TEXT)
 
 ### Main function ###
 
index d0bf25d..44e8526 100755 (executable)
@@ -65,6 +65,10 @@ echo "=== Get counter - current events ==="
 RESULT="0"
 do_curl GET /counter/current_messages 200
 
+echo "=== Get counter - remote hosts ==="
+RESULT="*"
+do_curl GET /counter/remote_hosts 200
+
 echo "=== Send a request non json ==="
 RESULT="*"
 #create payload
@@ -235,6 +239,32 @@ RESULT="0"
 do_curl GET /counter/current_messages 200
 
 
+# Check delay
+
+echo "=== Set delay 10 sec==="
+RESULT="*"
+do_curl POST /forcedelay?delay=10 200
+
+TSECONDS=$SECONDS
+echo "=== Send a request, dealyed ==="
+RESULT="*"
+#create payload
+echo "{\"DATA-MSG\":\"msg-del1\"}" > .tmp.json
+do_curl POST '/callbacks/test' 200 .tmp.json
+
+if [ $(($SECONDS-$TSECONDS)) -lt 10 ]; then
+    echo "  Delay failed $(($SECONDS-$TSECONDS))"
+    echo "  Exiting...."
+    exit 1
+else
+    echo "  Delay OK $(($SECONDS-$TSECONDS))"
+fi
+
+
+echo "=== Fetch an event ==="
+RESULT="json:{\"DATA-MSG\":\"msg-del1\"}"
+do_curl GET '/get-event/test' 200
+
 echo "********************"
 echo "*** All tests ok ***"
 echo "********************"
index 90924b1..fb6d674 100644 (file)
@@ -43,6 +43,8 @@ lock = RLock()
 msg_requests=[]
 msg_responses={}
 
+generic_messages={}
+
 # Server info
 HOST_IP = "::"
 HOST_PORT = 2222
@@ -105,7 +107,7 @@ def dmaap_uploader():
         sleep(0.01)
 
 
-# Function to download messages from dmaap
+# Function to upload messages to dmaap
 def dmaap_downloader():
     global msg_responses
     global cntr_msg_responses_submitted
@@ -365,6 +367,97 @@ def oru_read():
         return Response(json.dumps(res), status=200, mimetype=MIME_JSON)
     return Response("[]", status=200, mimetype=MIME_JSON)
 
+# Generic POST/PUT catching all urls starting with /events/<topic>.
+# Writes the message in a que for that topic
+@app.route("/events/<path>",
+    methods=['PUT','POST'])
+def generic_write(path):
+    global generic_messages
+    global cntr_msg_responses_submitted
+    urlkey="/events/"+str(path)
+    write_method=str(request.method)
+    with lock:
+        try:
+            payload=request.json
+            print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
+            topicmsgs=[]
+            if (urlkey in generic_messages.keys()):
+                topicmsgs=generic_messages[urlkey]
+            else:
+                generic_messages[urlkey]=topicmsgs
+
+            if isinstance(payload, list):
+                for listitem in payload:
+                    topicmsgs.append(listitem)
+            else:
+                topicmsgs.append(payload)
+
+            cntr_msg_responses_submitted += 1
+        except Exception as e:
+            print(write_method + "on "+urlkey+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
+            return Response('{"message": "' + SERVER_ERROR + ' ' + str(e) + '","status":"500"}', status=200, mimetype=MIME_JSON)
+
+        return Response('{}', status=200, mimetype=MIME_JSON)
+
+# Generic GET catching all urls starting with /events/. Returns max 4096 json msgs in an array.
+# Returns only the messages previously written to the same urls
+@app.route("/events/<path:path>",
+    methods=['GET'])
+def generic_read(path):
+    global generic_messages
+    global cntr_msg_requests_fetched
+
+    urlpath="/events/"+str(path)
+    urlkey="/events/"+str(path).split("/")[0] #Extract topic
+    print("GET on topic"+urlkey)
+    limit=request.args.get('limit')
+    if (limit is None):
+        limit=4096
+    else:
+        limit=int(limit)
+    if (limit<0):
+        limit=0
+    if (limit>4096):
+        limit=4096
+    print("Limting number of returned messages to: "+str(limit))
+
+    timeout=request.args.get('timeout')
+    if (timeout is None):
+        timeout=10000
+    else:
+        timeout=min(int(timeout),60000)
+
+    start_time=int(round(time.time() * 1000))
+    current_time=int(round(time.time() * 1000))
+    topicmsgs=[]
+    if (urlkey in generic_messages.keys()):
+        topicmsgs=generic_messages[urlkey]
+
+    while(current_time<start_time+int(timeout)):
+        with lock:
+            if(len(topicmsgs)>0):
+                try:
+                    msgs=''
+                    cntr=0
+                    while(cntr<limit and len(topicmsgs)>0):
+                        if (len(msgs)>1):
+                            msgs=msgs+','
+                        msgs=msgs+json.dumps(json.dumps(topicmsgs.pop(0)))
+                        cntr_msg_requests_fetched += 1
+                        cntr=cntr+1
+                    msgs='['+msgs+']'
+                    print("GET on "+urlpath+" MSGs: "+msgs)
+                    return Response(msgs, status=200, mimetype=MIME_JSON)
+                except Exception as e:
+                    print("GET on "+urlpath+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
+                    return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT)
+        sleep(0.025) # sleep 25 milliseconds
+        current_time=int(round(time.time() * 1000))
+
+    print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time))
+    return Response("[]", status=200, mimetype=MIME_JSON)
+
+
 ### Functions for metrics read out ###
 
 @app.route('/counter/requests_submitted',
index dbd4297..582c36f 100755 (executable)
@@ -164,6 +164,17 @@ echo "=== Fetch a response ==="
 RESULT="test2-response200"
 do_curl GET '/receive-response?correlationid='$CORRID 200
 
+
+echo "=== Send a json response ==="
+# Create minimal accepted response message, array
+echo "{\"correlationId\": \""$CORRID"\", \"message\": {\"test\":\"testresponse\"}, \"status\": \"200\"}" > .tmp.json
+RESULT="{}"
+do_curl POST /events/generic-path 200 .tmp.json
+
+echo "=== Fetch a request ==="
+RESULT="json:[{\"correlationId\": \""$CORRID"\", \"message\": {\"test\":\"testresponse\"}, \"status\": \"200\"}]"
+do_curl GET '/events/generic-path' 200
+
 echo "********************"
 echo "*** All tests ok ***"
 echo "********************"
\ No newline at end of file
index 4bf9271..105c875 100644 (file)
@@ -15,7 +15,7 @@
 #  ============LICENSE_END=================================================
 #
 
-version: '3'
+version: '3.0'
 
 networks:
   default:
diff --git a/test/simulator-group/dmaapadp/.gitignore b/test/simulator-group/dmaapadp/.gitignore
new file mode 100644 (file)
index 0000000..4aa3a7a
--- /dev/null
@@ -0,0 +1,2 @@
+.tmp.json
+.dockererr
\ No newline at end of file
diff --git a/test/simulator-group/dmaapadp/app.yaml b/test/simulator-group/dmaapadp/app.yaml
new file mode 100644 (file)
index 0000000..1376f38
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $DMAAP_ADP_APP_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $DMAAP_ADP_APP_NAME
+    autotest: DMAAPADP
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $DMAAP_ADP_APP_NAME
+  template:
+    metadata:
+      labels:
+        run: $DMAAP_ADP_APP_NAME
+        autotest: DMAAPADP
+    spec:
+      containers:
+      - name: $DMAAP_ADP_APP_NAME
+        image: $DMAAP_ADP_IMAGE
+        imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+        ports:
+        - name: http
+          containerPort: $DMAAP_ADP_INTERNAL_PORT
+        - name: https
+          containerPort: $DMAAP_ADP_INTERNAL_SECURE_PORT
+        volumeMounts:
+        - mountPath: $DMAAP_ADP_CONFIG_MOUNT_PATH
+          name: dmaapadp-conf-name
+        - mountPath: $DMAAP_ADP_DATA_MOUNT_PATH
+          name: dmaapadp-data-name
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: $DMAAP_ADP_CONFIG_CONFIGMAP_NAME
+        name: dmaapadp-conf-name
+      - configMap:
+          defaultMode: 420
+          name: $DMAAP_ADP_DATA_CONFIGMAP_NAME
+        name: dmaapadp-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
+
diff --git a/test/simulator-group/dmaapadp/application.yaml b/test/simulator-group/dmaapadp/application.yaml
new file mode 100644 (file)
index 0000000..b20a9d7
--- /dev/null
@@ -0,0 +1,71 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+
+spring:
+  profiles:
+    active: prod
+  main:
+    allow-bean-definition-overriding: true
+  aop:
+    auto: false
+management:
+  endpoints:
+    web:
+      exposure:
+        # Enabling of springboot actuator features. See springboot documentation.
+        include: "loggers,logfile,health,info,metrics,threaddump,heapdump"
+springdoc:
+  show-actuator: true
+logging:
+  # Configuration of logging
+  level:
+    ROOT: ERROR
+    org.springframework: ERROR
+    org.springframework.data: ERROR
+    org.springframework.web.reactive.function.client.ExchangeFunctions: ERROR
+    org.oran.dmaapadapter: INFO
+  file:
+    name: /var/log/dmaap-adaptor-service/application.log
+server:
+   # Configuration of the HTTP/REST server. The parameters are defined and handeled by the springboot framework.
+   # See springboot documentation.
+   port : $DMAAP_ADP_INTERNAL_SECURE_PORT
+   http-port: $DMAAP_ADP_INTERNAL_PORT
+   ssl:
+      key-store-type: JKS
+      key-store-password: policy_agent
+      key-store: /opt/app/dmaap-adaptor-service/etc/cert/keystore.jks
+      key-password: policy_agent
+      key-alias: policy_agent
+app:
+  webclient:
+    # Configuration of the trust store used for the HTTP client (outgoing requests)
+    # The file location and the password for the truststore is only relevant if trust-store-used == true
+    # Note that the same keystore as for the server is used.
+    trust-store-used: false
+    trust-store-password: policy_agent
+    trust-store: /opt/app/dmaap-adaptor-service/etc/cert/truststore.jks
+    # Configuration of usage of HTTP Proxy for the southbound accesses.
+    # The HTTP proxy (if configured) will only be used for accessing NearRT RIC:s
+    http.proxy-host: $DMAAP_ADP_HTTP_PROXY_CONFIG_HOST_NAME
+    http.proxy-port: $DMAAP_ADP_HTTP_PROXY_CONFIG_PORT
+  ecs-base-url: $ECS_SERVICE_PATH
+  # Location of the component configuration file. The file will only be used if the Consul database is not used;
+  # configuration from the Consul will override the file.
+  configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json
+  dmaap-base-url: $MR_SERVICE_PATH
+  # The url used to adress this component. This is used as a callback url sent to other components.
+  dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
\ No newline at end of file
diff --git a/test/simulator-group/dmaapadp/application_configuration.json b/test/simulator-group/dmaapadp/application_configuration.json
new file mode 100644 (file)
index 0000000..b6605e3
--- /dev/null
@@ -0,0 +1,9 @@
+{
+  "types": [
+     {
+        "id": "ExampleInformationType",
+        "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs",
+        "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
+     }
+  ]
+}
\ No newline at end of file
diff --git a/test/simulator-group/dmaapadp/docker-compose.yml b/test/simulator-group/dmaapadp/docker-compose.yml
new file mode 100644 (file)
index 0000000..f9dee41
--- /dev/null
@@ -0,0 +1,34 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+version: '3.0'
+networks:
+  default:
+    external:
+      name: ${DOCKER_SIM_NWNAME}
+services:
+  dmaap-adapter-service:
+    image: ${DMAAP_ADP_IMAGE}
+    container_name: ${DMAAP_ADP_APP_NAME}
+    ports:
+    - ${DMAAP_ADP_EXTERNAL_PORT}:${DMAAP_ADP_INTERNAL_PORT}
+    - ${DMAAP_ADP_EXTERNAL_SECURE_PORT}:${DMAAP_ADP_INTERNAL_SECURE_PORT}
+    volumes:
+    - ${DMAAP_ADP_HOST_MNT_DIR}/$DMAAP_ADP_CONFIG_FILE:${DMAAP_ADP_CONFIG_MOUNT_PATH}/$DMAAP_ADP_CONFIG_FILE
+    - ${DMAAP_ADP_HOST_MNT_DIR}/$DMAAP_ADP_DATA_FILE:${DMAAP_ADP_DATA_MOUNT_PATH}/$DMAAP_ADP_DATA_FILE
+    labels:
+      - "nrttest_app=DMAAPADP"
+      - "nrttest_dp=${DMAAP_ADP_DISPLAY_NAME}"
diff --git a/test/simulator-group/dmaapadp/svc.yaml b/test/simulator-group/dmaapadp/svc.yaml
new file mode 100644 (file)
index 0000000..62a7f7a
--- /dev/null
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: $DMAAP_ADP_APP_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $DMAAP_ADP_APP_NAME
+    autotest: DMAAPADP
+spec:
+  type: ClusterIP
+  ports:
+  - port: $DMAAP_ADP_EXTERNAL_PORT
+    targetPort: $DMAAP_ADP_INTERNAL_PORT
+    protocol: TCP
+    name: http
+  - port: $DMAAP_ADP_EXTERNAL_SECURE_PORT
+    targetPort: $DMAAP_ADP_INTERNAL_SECURE_PORT
+    protocol: TCP
+    name: https
+  selector:
+    run: $DMAAP_ADP_APP_NAME
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmed/.gitignore b/test/simulator-group/dmaapmed/.gitignore
new file mode 100644 (file)
index 0000000..4aa3a7a
--- /dev/null
@@ -0,0 +1,2 @@
+.tmp.json
+.dockererr
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmed/app.yaml b/test/simulator-group/dmaapmed/app.yaml
new file mode 100644 (file)
index 0000000..e0296fa
--- /dev/null
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $DMAAP_MED_APP_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $DMAAP_MED_APP_NAME
+    autotest: DMAAPMED
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $DMAAP_MED_APP_NAME
+  template:
+    metadata:
+      labels:
+        run: $DMAAP_MED_APP_NAME
+        autotest: DMAAPMED
+    spec:
+      containers:
+      - name: $DMAAP_MED_APP_NAME
+        image: $DMAAP_MED_IMAGE
+        imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+        ports:
+        - name: http
+          containerPort: $DMAAP_MED_INTERNAL_PORT
+        - name: https
+          containerPort: $DMAAP_MED_INTERNAL_SECURE_PORT
+        volumeMounts:
+        - mountPath: $DMAAP_MED_DATA_MOUNT_PATH/$DMAAP_MED_DATA_FILE
+          subPath: $DMAAP_MED_DATA_FILE
+          name: dmaapadp-data-name
+        env:
+        - name: INFO_PRODUCER_HOST
+          value: "$DMAAP_MED_CONF_SELF_HOST"
+        - name: INFO_PRODUCER_PORT
+          value: "$DMAAP_MED_CONF_SELF_PORT"
+        - name: INFO_COORD_ADDR
+          value: "$ECS_SERVICE_PATH"
+        - name: DMAAP_MR_ADDR
+          value: "$MR_SERVICE_PATH"
+        - name: LOG_LEVEL
+          value: "Debug"
+      volumes:
+      - configMap:
+          defaultMode: 420
+          name: $DMAAP_MED_DATA_CONFIGMAP_NAME
+        name: dmaapadp-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
+
diff --git a/test/simulator-group/dmaapmed/docker-compose.yml b/test/simulator-group/dmaapmed/docker-compose.yml
new file mode 100644 (file)
index 0000000..21fe551
--- /dev/null
@@ -0,0 +1,40 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+version: '3.0'
+networks:
+  default:
+    external:
+      name: ${DOCKER_SIM_NWNAME}
+services:
+  dmaap-mediator-service:
+    image: ${DMAAP_MED_IMAGE}
+    container_name: ${DMAAP_MED_APP_NAME}
+
+    ports:
+    - ${DMAAP_MED_EXTERNAL_PORT}:${DMAAP_MED_INTERNAL_PORT}
+    - ${DMAAP_MED_EXTERNAL_SECURE_PORT}:${DMAAP_MED_INTERNAL_SECURE_PORT}
+    environment:
+      - INFO_PRODUCER_HOST=${DMAAP_MED_CONF_SELF_HOST}
+      - INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT}
+      - INFO_COORD_ADDR=${ECS_SERVICE_PATH}
+      - DMAAP_MR_ADDR=${MR_SERVICE_PATH}
+      - LOG_LEVEL="Debug"
+    volumes:
+    - ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
+    labels:
+      - "nrttest_app=DMAAPMED"
+      - "nrttest_dp=${DMAAP_MED_DISPLAY_NAME}"
diff --git a/test/simulator-group/dmaapmed/svc.yaml b/test/simulator-group/dmaapmed/svc.yaml
new file mode 100644 (file)
index 0000000..a064cfa
--- /dev/null
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: $DMAAP_MED_APP_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $DMAAP_MED_APP_NAME
+    autotest: DMAAPMED
+spec:
+  type: ClusterIP
+  ports:
+  - port: $DMAAP_MED_EXTERNAL_PORT
+    targetPort: $DMAAP_MED_INTERNAL_PORT
+    protocol: TCP
+    name: http
+  - port: $DMAAP_MED_EXTERNAL_SECURE_PORT
+    targetPort: $DMAAP_MED_INTERNAL_SECURE_PORT
+    protocol: TCP
+    name: https
+  selector:
+    run: $DMAAP_MED_APP_NAME
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmed/type_config.json b/test/simulator-group/dmaapmed/type_config.json
new file mode 100644 (file)
index 0000000..8a67226
--- /dev/null
@@ -0,0 +1,9 @@
+{
+   "types":
+     [
+       {
+         "id": "STD_Fault_Messages",
+         "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages"
+       }
+   ]
+ }
\ No newline at end of file
index 39756bb..2b39d15 100644 (file)
@@ -80,7 +80,7 @@ spec:
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
         - name: http
-          containerPort: 9092
+          containerPort: 9095
         env:
         - name: enableCadi
           value: 'false'
@@ -92,12 +92,12 @@ spec:
           value: '40000'
         - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
           value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
-#        - name: KAFKA_ADVERTISED_LISTENERS
-#          value: 'INTERNAL_PLAINTEXT://akfak-bwds.onap:9092'
         - name: KAFKA_ADVERTISED_LISTENERS
-          value: 'INTERNAL_PLAINTEXT://localhost:9092'
+          value: 'INTERNAL_PLAINTEXT://kaka:9092'
+#        - name: KAFKA_ADVERTISED_LISTENERS
+#          value: 'INTERNAL_PLAINTEXT://localhost:9092'
         - name: KAFKA_LISTENERS
-          value: 'INTERNAL_PLAINTEXT://0.0.0.0:9092'
+          value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092'
         - name: KAFKA_INTER_BROKER_LISTENER_NAME
           value: INTERNAL_PLAINTEXT
         - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
@@ -110,6 +110,7 @@ spec:
           value: '1'
         - name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
           value: '1'
+
         volumeMounts:
         - mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
           subPath: zk_client_jaas.conf
@@ -167,6 +168,8 @@ spec:
           value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
         - name: ZOOKEEPER_SERVER_ID
           value: '1'
+        - name: ZOOKEEPER_SASL_ENABLED
+          value: 'false'
         volumeMounts:
         - mountPath: /etc/zookeeper/secrets/jaas/zk_server_jaas.conf
           subPath: zk_server_jaas.conf
index c468b2f..6b5c9c2 100644 (file)
@@ -15,7 +15,7 @@
 #  ============LICENSE_END=================================================
 #
 
-version: '3.5'
+version: '3.0'
 networks:
   default:
     external:
@@ -36,10 +36,10 @@ services:
      ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
      ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
      ZOOKEEPER_CLIENT_PORT: 2181
-     KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl
+     KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl -Dzookeeper.4lw.commands.whitelist=*
      ZOOKEEPER_SERVER_ID: 1
     volumes:
-      -  ./mnt/zk/zk_server_jaas.conf:/etc/zookeeper/secrets/jaas/zk_server_jaas.conf
+      -  .${MR_DMAAP_HOST_MNT_DIR}/zk/zk_server_jaas.conf:/etc/zookeeper/secrets/jaas/zk_server_jaas.conf
     networks:
       - default
     labels:
@@ -67,7 +67,7 @@ services:
     # Reduced the number of partitions only to avoid the timeout error for the first subscribe call in slow environment
     KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: 1
    volumes:
-     -  ./mnt/kafka/zk_client_jaas.conf:/etc/kafka/secrets/jaas/zk_client_jaas.conf
+     -  .${MR_DMAAP_HOST_MNT_DIR}/kafka/zk_client_jaas.conf:/etc/kafka/secrets/jaas/zk_client_jaas.conf
    networks:
     - default
    depends_on:
@@ -85,9 +85,9 @@ services:
     environment:
      enableCadi: 'false'
     volumes:
-      - ./mnt/mr/MsgRtrApi.properties:/appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
-      - ./mnt/mr/logback.xml:/appl/dmaapMR1/bundleconfig/etc/logback.xml
-      - ./mnt/mr/cadi.properties:/appl/dmaapMR1/etc/cadi.properties
+      - .${MR_DMAAP_HOST_MNT_DIR}/mr/MsgRtrApi.properties:/appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+      - .${MR_DMAAP_HOST_MNT_DIR}/mr/logback.xml:/appl/dmaapMR1/bundleconfig/etc/logback.xml
+      - .${MR_DMAAP_HOST_MNT_DIR}/mr/cadi.properties:/appl/dmaapMR1/etc/cadi.properties
     networks:
       - default
     depends_on:
diff --git a/test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf b/test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf
new file mode 100644 (file)
index 0000000..79a7601
--- /dev/null
@@ -0,0 +1,5 @@
+Client {
+  org.apache.zookeeper.server.auth.DigestLoginModule required
+  username="kafka"
+  password="kafka_secret";
+ };
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties
new file mode 100644 (file)
index 0000000..7f7bc41
--- /dev/null
@@ -0,0 +1,166 @@
+# LICENSE_START=======================================================
+#  org.onap.dmaap
+#  ================================================================================
+#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+#  ================================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=========================================================
+#
+#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+##
+## Cambria API Server config
+##
+## Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP service
+##
+## 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+config.zk.servers=zookeeper:2181
+
+###############################################################################
+##
+## Kafka Connection
+##
+##        Items below are passed through to Kafka's producer and consumer
+##        configurations (after removing "kafka.")
+##        if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
+kafka.metadata.broker.list=kaka:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+##        Secured Config
+##
+##        Some data stored in the config system is sensitive -- API keys and secrets,
+##        for example. to protect it, we use an encryption layer for this section
+##        of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+##        Kafka expects live connections from the consumer to the broker, which
+##        obviously doesn't work over connectionless HTTP requests. The Cambria
+##        server proxies HTTP requests into Kafka consumer sessions that are kept
+##        around for later re-use. Not doing so is costly for setup per request,
+##        which would substantially impact a high volume consumer's performance.
+##
+##        This complicates Cambria server failover, because we often need server
+##        A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+##        This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
+msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+enforced.topic.name.AAF=org.onap.dmaap.mr
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.onap.dmaap.mr
+##############################################################################
+#Mirror Maker Agent
+
+msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties
new file mode 100644 (file)
index 0000000..4764321
--- /dev/null
@@ -0,0 +1,166 @@
+# LICENSE_START=======================================================
+#  org.onap.dmaap
+#  ================================================================================
+#  Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+#  ================================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=========================================================
+#
+#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+##
+## Cambria API Server config
+##
+## Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP service
+##
+## 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+config.zk.servers=zookeeper:2181
+
+###############################################################################
+##
+## Kafka Connection
+##
+##        Items below are passed through to Kafka's producer and consumer
+##        configurations (after removing "kafka.")
+##        if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
+kafka.metadata.broker.list=kafka:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+##        Secured Config
+##
+##        Some data stored in the config system is sensitive -- API keys and secrets,
+##        for example. to protect it, we use an encryption layer for this section
+##        of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+##        Kafka expects live connections from the consumer to the broker, which
+##        obviously doesn't work over connectionless HTTP requests. The Cambria
+##        server proxies HTTP requests into Kafka consumer sessions that are kept
+##        around for later re-use. Not doing so is costly for setup per request,
+##        which would substantially impact a high volume consumer's performance.
+##
+##        This complicates Cambria server failover, because we often need server
+##        A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+##        This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
+msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+enforced.topic.name.AAF=org.onap.dmaap.mr
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.onap.dmaap.mr
+##############################################################################
+#Mirror Maker Agent
+
+msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties b/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties
new file mode 100644 (file)
index 0000000..3cd26ad
--- /dev/null
@@ -0,0 +1,19 @@
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/logback.xml b/test/simulator-group/dmaapmr/mnt2/mr/logback.xml
new file mode 100644 (file)
index 0000000..f02a2db
--- /dev/null
@@ -0,0 +1,208 @@
+<!--
+     ============LICENSE_START=======================================================
+     Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+     ================================================================================
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+           http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+     ============LICENSE_END=========================================================
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+  <contextName>${module.ajsc.namespace.name}</contextName>
+  <jmxConfigurator />
+  <property name="logDirectory" value="${AJSC_HOME}/log" />
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>ERROR</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+      </pattern>
+    </encoder>
+  </appender>
+
+  <appender name="INFO" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>INFO</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+  </appender>
+
+  <appender name="DEBUG" class="ch.qos.logback.core.ConsoleAppender">
+
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+  <appender name="ERROR" class="ch.qos.logback.core.ConsoleAppender"> class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>ERROR</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+
+  <!-- Msgrtr related loggers -->
+  <logger name="org.onap.dmaap.dmf.mr.service" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.service.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.resources" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.resources.streamReaders" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.backends" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.kafka" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.memory" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.beans" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.constants" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.exception" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.listener" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher.impl" level="INFO" />
+
+
+
+  <logger name="org.onap.dmaap.dmf.mr.security" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.security.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.transaction" level="INFO" />
+  <logger name="com.att.dmf.mr.transaction.impl" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+  <logger name="org.onap.dmaap.dmf.mr.utils" level="INFO" />
+  <logger name="org.onap.dmaap.mr.filter" level="INFO" />
+
+  <!--<logger name="com.att.nsa.cambria.*" level="INFO" />-->
+
+  <!-- Msgrtr loggers in ajsc -->
+  <logger name="org.onap.dmaap.service" level="INFO" />
+  <logger name="org.onap.dmaap" level="INFO" />
+
+
+  <!-- Spring related loggers -->
+  <logger name="org.springframework" level="WARN" additivity="false"/>
+  <logger name="org.springframework.beans" level="WARN" additivity="false"/>
+  <logger name="org.springframework.web" level="WARN" additivity="false" />
+  <logger name="com.blog.spring.jms" level="WARN" additivity="false" />
+
+  <!-- AJSC Services (bootstrap services) -->
+  <logger name="ajsc" level="WARN" additivity="false"/>
+  <logger name="ajsc.RouteMgmtService" level="INFO" additivity="false"/>
+  <logger name="ajsc.ComputeService" level="INFO" additivity="false" />
+  <logger name="ajsc.VandelayService" level="WARN" additivity="false"/>
+  <logger name="ajsc.FilePersistenceService" level="WARN" additivity="false"/>
+  <logger name="ajsc.UserDefinedJarService" level="WARN" additivity="false" />
+  <logger name="ajsc.UserDefinedBeansDefService" level="WARN" additivity="false" />
+  <logger name="ajsc.LoggingConfigurationService" level="WARN" additivity="false" />
+
+  <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+    logging) -->
+  <logger name="ajsc.utils" level="WARN" additivity="false"/>
+  <logger name="ajsc.utils.DME2Helper" level="INFO" additivity="false" />
+  <logger name="ajsc.filters" level="DEBUG" additivity="false" />
+  <logger name="ajsc.beans.interceptors" level="DEBUG" additivity="false" />
+  <logger name="ajsc.restlet" level="DEBUG" additivity="false" />
+  <logger name="ajsc.servlet" level="DEBUG" additivity="false" />
+  <logger name="com.att" level="WARN" additivity="false" />
+  <logger name="com.att.ajsc.csi.logging" level="WARN" additivity="false" />
+  <logger name="com.att.ajsc.filemonitor" level="WARN" additivity="false"/>
+
+  <logger name="com.att.nsa.dmaap.util" level="INFO" additivity="false"/>
+  <logger name="com.att.cadi.filter" level="INFO" additivity="false" />
+
+
+  <!-- Other Loggers that may help troubleshoot -->
+  <logger name="net.sf" level="WARN" additivity="false" />
+  <logger name="org.apache.commons.httpclient" level="WARN" additivity="false"/>
+  <logger name="org.apache.commons" level="WARN" additivity="false" />
+  <logger name="org.apache.coyote" level="WARN" additivity="false"/>
+  <logger name="org.apache.jasper" level="WARN" additivity="false"/>
+
+  <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+    May aid in troubleshooting) -->
+  <logger name="org.apache.camel" level="WARN" additivity="false" />
+  <logger name="org.apache.cxf" level="WARN" additivity="false" />
+  <logger name="org.apache.camel.processor.interceptor" level="WARN" additivity="false"/>
+  <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" additivity="false" />
+  <logger name="org.apache.cxf.service" level="WARN" additivity="false" />
+  <logger name="org.restlet" level="DEBUG" additivity="false" />
+  <logger name="org.apache.camel.component.restlet" level="DEBUG" additivity="false" />
+  <logger name="org.apache.kafka" level="DEBUG" additivity="false" />
+  <logger name="org.apache.zookeeper" level="INFO" additivity="false" />
+  <logger name="org.I0Itec.zkclient" level="DEBUG" additivity="false" />
+
+  <!-- logback internals logging -->
+  <logger name="ch.qos.logback.classic" level="INFO" additivity="false"/>
+  <logger name="ch.qos.logback.core" level="INFO" additivity="false" />
+
+  <!-- logback jms appenders & loggers definition starts here -->
+  <!-- logback jms appenders & loggers definition starts here -->
+  <appender name="auditLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="perfLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="ASYNC-audit" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Audit-Record-Queue" />
+  </appender>
+
+  <logger name="AuditRecord" level="INFO" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <logger name="AuditRecord_DirectCall" level="INFO" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <appender name="ASYNC-perf" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Performance-Tracker-Queue" />
+  </appender>
+  <logger name="PerfTrackerRecord" level="INFO" additivity="FALSE">
+    <appender-ref ref="ASYNC-perf" />
+    <appender-ref ref="perfLogs" />
+  </logger>
+  <!-- logback jms appenders & loggers definition ends here -->
+
+  <root level="DEBUG">
+    <appender-ref ref="DEBUG" />
+    <appender-ref ref="ERROR" />
+    <appender-ref ref="INFO" />
+    <appender-ref ref="STDOUT" />
+  </root>
+
+</configuration>
diff --git a/test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf b/test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf
new file mode 100644 (file)
index 0000000..3d2767f
--- /dev/null
@@ -0,0 +1,4 @@
+Server {
+       org.apache.zookeeper.server.auth.DigestLoginModule required
+       user_kafka="kafka_secret";
+};
\ No newline at end of file
index 0a02b4f..e5d5d8e 100644 (file)
@@ -32,7 +32,7 @@ spec:
   type: ClusterIP
   ports:
   - port: 9092
-    targetPort: 9092
+    targetPort: 9095
     protocol: TCP
     name: http
   selector:
@@ -54,4 +54,82 @@ spec:
     protocol: TCP
     name: http
   selector:
+    run: $MR_ZOOKEEPER_APP_NAME
+
+
+# ---
+# apiVersion: v1
+# kind: Service
+# metadata:
+#   name: dmaap-mr
+#   namespace: $KUBE_ONAP_NAMESPACE
+#   labels:
+#     run: $MR_DMAAP_KUBE_APP_NAME
+#     autotest: DMAAPMR
+# spec:
+#   type: ClusterIP
+#   ports:
+#   - port: $MR_EXTERNAL_PORT
+#     targetPort: $MR_INTERNAL_PORT
+#     protocol: TCP
+#     name: http
+#   - port: $MR_EXTERNAL_SECURE_PORT
+#     targetPort: $MR_INTERNAL_SECURE_PORT
+#     protocol: TCP
+#     name: https
+#   selector:
+#     run: $MR_DMAAP_KUBE_APP_NAME
+# ---
+# apiVersion: v1
+# kind: Service
+# metadata:
+#   name: dmaap-kafka
+#   namespace: $KUBE_ONAP_NAMESPACE
+#   labels:
+#     run: $MR_KAFKA_BWDS_NAME
+#     autotest: DMAAPMR
+# spec:
+#   type: ClusterIP
+#   ports:
+#   - port: 9092
+#     targetPort: 9092
+#     protocol: TCP
+#     name: http
+#   selector:
+#     run: $MR_KAFKA_BWDS_NAME
+# ---
+# apiVersion: v1
+# kind: Service
+# metadata:
+#   name: kafka
+#   namespace: $KUBE_ONAP_NAMESPACE
+#   labels:
+#     run: $MR_KAFKA_BWDS_NAME
+#     autotest: DMAAPMR
+# spec:
+#   type: ClusterIP
+#   ports:
+#   - port: 9092
+#     targetPort: 9092
+#     protocol: TCP
+#     name: http
+#   selector:
+#     run: $MR_KAFKA_BWDS_NAME
+# ---
+# apiVersion: v1
+# kind: Service
+# metadata:
+#   name: dmaap-zookeeper
+#   namespace: $KUBE_ONAP_NAMESPACE
+#   labels:
+#     run: $MR_ZOOKEEPER_APP_NAME
+#     autotest: DMAAPMR
+# spec:
+#   type: ClusterIP
+#   ports:
+#   - port: 2181
+#     targetPort: 2181
+#     protocol: TCP
+#     name: http
+#   selector:
     run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file
diff --git a/test/simulator-group/kubeproxy/docker-compose.yml b/test/simulator-group/kubeproxy/docker-compose.yml
new file mode 100644 (file)
index 0000000..7f0f349
--- /dev/null
@@ -0,0 +1,33 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+version: '3.0'
+networks:
+  default:
+    external:
+      name: ${DOCKER_SIM_NWNAME}
+services:
+  kubeproxy:
+    image: ${KUBE_PROXY_IMAGE}
+    container_name: ${KUBE_PROXY_APP_NAME}
+    ports:
+    - ${KUBE_PROXY_DOCKER_EXTERNAL_PORT}:${KUBE_PROXY_INTERNAL_PORT}
+    - ${KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT}:${KUBE_PROXY_INTERNAL_SECURE_PORT}
+    - ${KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT}:${KUBE_PROXY_WEB_INTERNAL_PORT}
+    - ${KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT}:${KUBE_PROXY_WEB_INTERNAL_SECURE_PORT}
+    labels:
+      - "nrttest_app=KUBEPROXY"
+      - "nrttest_dp=${KUBE_PROXY_DISPLAY_NAME}"
index 790ee87..35cf6ea 100644 (file)
@@ -3,11 +3,14 @@ kind: Pod
 metadata:
   name: pvc-cleaner
   namespace: $PVC_CLEANER_NAMESPACE
+  labels:
+    run: $PVC_CLEANER_APP_NAME
+    autotest: PVCCLEANER
 spec:
   restartPolicy: Never
   containers:
   - name: pvc-cleaner
-    image: "ubuntu:20.10"
+    image: $PVC_CLEANER_IMAGE
     command: ["/bin/sh","-c"]
     args: ["rm -rf $PVC_CLEANER_RM_PATH/*"]
     volumeMounts:
index c139730..36e4af1 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: $RIC_SIM_SET_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_A1SIM_NAMESPACE
   labels:
     run: $RIC_SIM_SET_NAME
     autotest: RICSIM
index 37fad42..a6358c7 100644 (file)
@@ -23,6 +23,7 @@ networks:
   default:
     external:
       name: ${DOCKER_SIM_NWNAME}
+
 services:
   g1:
     image: ${RIC_SIM_IMAGE}
@@ -77,3 +78,39 @@ services:
     labels:
       - "nrttest_app=RICSIM"
       - "nrttest_dp=${RIC_SIM_DISPLAY_NAME}"
+
+  g4:
+    image: ${RIC_SIM_IMAGE}
+    networks:
+      - default
+    ports:
+      - ${RIC_SIM_INTERNAL_PORT}/tcp
+      - ${RIC_SIM_INTERNAL_SECURE_PORT}/tcp
+    environment:
+      - A1_VERSION=${G4_A1_VERSION}
+      - REMOTE_HOSTS_LOGGING=1
+      - ALLOW_HTTP=true
+      - DUPLICATE_CHECK=1
+    volumes:
+      - ${RIC_SIM_CERT_MOUNT_DIR}:/usr/src/app/cert:ro
+    labels:
+      - "nrttest_app=RICSIM"
+      - "nrttest_dp=${RIC_SIM_DISPLAY_NAME}"
+
+  g5:
+    image: ${RIC_SIM_IMAGE}
+    networks:
+      - default
+    ports:
+      - ${RIC_SIM_INTERNAL_PORT}/tcp
+      - ${RIC_SIM_INTERNAL_SECURE_PORT}/tcp
+    environment:
+      - A1_VERSION=${G5_A1_VERSION}
+      - REMOTE_HOSTS_LOGGING=1
+      - ALLOW_HTTP=true
+      - DUPLICATE_CHECK=1
+    volumes:
+      - ${RIC_SIM_CERT_MOUNT_DIR}:/usr/src/app/cert:ro
+    labels:
+      - "nrttest_app=RICSIM"
+      - "nrttest_dp=${RIC_SIM_DISPLAY_NAME}"
\ No newline at end of file
index 902bb64..a1a3f04 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: $RIC_SIM_SET_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_A1SIM_NAMESPACE
   labels:
     run: $RIC_SIM_SET_NAME
     autotest: RICSIM
index 13d4739..45f0f08 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -46,7 +46,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
index 48aeaf9..8861fe0 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -62,7 +62,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
index 9d81df5..03483a0 100644 (file)
@@ -13,7 +13,7 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 # ==================================================================================
-version: '3'
+version: '3.0'
 
 networks:
   default:
index 6a9cd1d..45af8b6 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -24,7 +24,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: dbhost
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
@@ -42,7 +42,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: sdnctldb01
-  namespace: $KUBE_NONRTRIC_NAMESPACE
+  namespace: $KUBE_SNDC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC