Merge "NONRTRIC - ECS Persistent storage of EI Jobs"
authorJohn Keeney <john.keeney@est.tech>
Fri, 30 Apr 2021 09:23:32 +0000 (09:23 +0000)
committerGerrit Code Review <gerrit@o-ran-sc.org>
Fri, 30 Apr 2021 09:23:32 +0000 (09:23 +0000)
45 files changed:
test/auto-test/.gitignore
test/auto-test/FTC1.sh
test/auto-test/FTC10.sh
test/auto-test/FTC100.sh
test/auto-test/FTC110.sh
test/auto-test/FTC1100.sh
test/auto-test/FTC150.sh
test/auto-test/FTC1800.sh
test/auto-test/FTC2001.sh
test/auto-test/FTC300.sh
test/auto-test/FTC310.sh
test/auto-test/FTC350.sh
test/auto-test/FTC800.sh
test/auto-test/FTC805.sh [new file with mode: 0755]
test/auto-test/FTC810.sh
test/auto-test/FTC850.sh
test/auto-test/FTC900.sh
test/auto-test/ONAP_UC.sh
test/auto-test/PM_DEMO.sh
test/auto-test/PM_EI_DEMO.sh
test/auto-test/README.md
test/common/.gitignore
test/common/README.md
test/common/agent_api_functions.sh
test/common/cr_api_functions.sh
test/common/delete_policies_process.py
test/common/ecs_api_functions.sh
test/common/kube_proxy_api_functions.sh
test/common/mr_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/ricsimulator_api_functions.sh
test/common/test_env-onap-guilin.sh
test/common/test_env-onap-honolulu.sh
test/common/test_env-onap-istanbul.sh [new file with mode: 0644]
test/common/test_env-oran-cherry.sh
test/common/test_env-oran-dawn.sh
test/common/testcase_common.sh
test/simulator-group/ecs/app.yaml
test/simulator-group/ecs/pvc.yaml
test/simulator-group/kubeproxy/svc.yaml
test/simulator-group/policy_agent/app.yaml
test/simulator-group/policy_agent/application.yaml
test/simulator-group/policy_agent/docker-compose.yml
test/simulator-group/policy_agent/pv.yaml [new file with mode: 0644]
test/simulator-group/policy_agent/pvc.yaml [new file with mode: 0644]

index 6009a54..c40137d 100644 (file)
@@ -16,3 +16,4 @@ logs
 .result*
 tmp
 NO-CHECKIN
+TEST_TMP
index 6ab87e3..f39ebfb 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index c8a65ce..2e443cb 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index fd6b9a4..f58ba16 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 1107193..c9108f8 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 0334246..6c74daf 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index abeac6a..51bcbe4 100755 (executable)
@@ -28,7 +28,7 @@ KUBE_INCLUDED_IMAGES=" RICSIM SDNC KUBEPROXY"
 KUBE_PRESTARTED_IMAGES=" "
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index bbee77b..f3d9501 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
@@ -215,7 +215,9 @@ else
     ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
 fi
 
-restart_ecs
+stop_ecs
+
+start_stopped_ecs
 
 set_ecs_trace
 
index 7ebd682..c62a135 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index ca10032..1efdfde 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 90bc05d..616751b 100755 (executable)
@@ -24,7 +24,7 @@ TC_ONELINE_DESCR="Resync of RIC via changes in the consul config or pushed confi
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER"
 
index 9d47caa..4390cdb 100755 (executable)
@@ -28,7 +28,7 @@ KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
 KUBE_PRESTARTED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 7398481..40c4f13 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
diff --git a/test/auto-test/FTC805.sh b/test/auto-test/FTC805.sh
new file mode 100755 (executable)
index 0000000..13f534e
--- /dev/null
@@ -0,0 +1,282 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="PMS Create 10000 policies and restart, test polices persistency"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR PA RICSIM SDNC NGW"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES="CP CR PA RICSIM SDNC KUBEPROXY NGW"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES="NGW"
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ONAP-ISTANBUL ORAN-DAWN"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh  $@
+. ../common/agent_api_functions.sh
+. ../common/ricsimulator_api_functions.sh
+. ../common/control_panel_api_functions.sh
+. ../common/controller_api_functions.sh
+. ../common/consul_cbs_functions.sh
+. ../common/cr_api_functions.sh
+. ../common/kube_proxy_api_functions.sh
+. ../common/gateway_api_functions.sh
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+#Local vars in test script
+##########################
+
+# Tested variants of REST/DMAAP/SDNC config
+TESTED_VARIANTS="REST"
+
+#Test agent and simulator protocol versions (others are http only)
+TESTED_PROTOCOLS="HTTP"
+
+NUM_RICS=5
+NUM_POLICIES_PER_RIC=2000
+
+
+generate_policy_uuid
+
+if [ "$PMS_VERSION" == "V2" ]; then
+    notificationurl=$CR_SERVICE_PATH"/test"
+else
+    notificationurl=""
+fi
+
+for __httpx in $TESTED_PROTOCOLS ; do
+    for interface in $TESTED_VARIANTS ; do
+
+        echo "#####################################################################"
+        echo "#####################################################################"
+        echo "### Testing agent: "$interface" and "$__httpx
+        echo "#####################################################################"
+        echo "#####################################################################"
+
+        if [ $__httpx == "HTTPS" ]; then
+            use_cr_https
+            use_simulator_https
+            if [[ $interface = *"SDNC"* ]]; then
+                use_sdnc_https
+            fi
+            use_agent_rest_https
+        else
+            use_cr_http
+            use_simulator_http
+            if [[ $interface = *"SDNC"* ]]; then
+                use_sdnc_http
+            fi
+            use_agent_rest_http
+        fi
+
+        # Clean container and start all needed containers #
+        clean_environment
+
+        if [ $RUNMODE == "KUBE" ]; then
+            start_kube_proxy
+        fi
+
+        start_ric_simulators ricsim_g1 $NUM_RICS STD_2.0.0
+
+        start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
+
+        if [ ! -z "$NRT_GATEWAY_APP_NAME" ]; then
+            start_gateway $SIM_GROUP/$NRT_GATEWAY_COMPOSE_DIR/$NRT_GATEWAY_CONFIG_FILE
+        fi
+
+        start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
+
+        set_agent_debug
+
+        if [ $RUNMODE == "DOCKER" ]; then
+            start_consul_cbs
+        fi
+
+        if [[ $interface = *"SDNC"* ]]; then
+            start_sdnc
+            prepare_consul_config      SDNC  ".consul_config.json"
+        else
+            prepare_consul_config      NOSDNC  ".consul_config.json"
+        fi
+
+        if [ $RUNMODE == "KUBE" ]; then
+            agent_load_config                       ".consul_config.json"
+        else
+            consul_config_app                      ".consul_config.json"
+        fi
+
+        start_cr
+
+        api_get_status 200
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_print ricsim_g1_$i interface
+        done
+
+        echo "Load policy type in group 1 simulators"
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_put_policy_type 201 ricsim_g1_$i STD_QOS_0_2_0 testdata/STD2/sim_qos.json
+        done
+
+        if [ "$PMS_VERSION" == "V2" ]; then
+            api_equal json:policy-types 1 300  #Wait for the agent to refresh types from the simulator
+        else
+            api_equal json:policy_types 1 300  #Wait for the agent to refresh types from the simulator
+        fi
+
+        api_put_service 201 "serv1" 600 "$CR_SERVICE_PATH/1"
+
+        echo "Check the number of types in the agent for each ric is 1"
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            if [ "$PMS_VERSION" == "V2" ]; then
+                api_equal json:policy-types?ric_id=ricsim_g1_$i 1 120
+            else
+                api_equal json:policy_types?ric=ricsim_g1_$i 1 120
+            fi
+        done
+
+        START_ID=2000
+
+        start_timer "Create $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_put_policy_parallel 201 "serv1" ricsim_g1_ $NUM_RICS STD_QOS_0_2_0 $START_ID NOTRANSIENT $notificationurl testdata/STD/pi1_template.json $NUM_POLICIES_PER_RIC 7
+
+        print_timer "Create $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        INSTANCES=$(($NUM_RICS*$NUM_POLICIES_PER_RIC))
+        api_equal json:policies $INSTANCES
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances $NUM_POLICIES_PER_RIC
+        done
+
+        stop_policy_agent
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        api_equal json:policies $INSTANCES 500
+
+        stop_policy_agent
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_post_delete_instances 200 ricsim_g1_$i
+        done
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        start_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
+
+
+        api_equal json:policies $INSTANCES 500
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances $NUM_POLICIES_PER_RIC 500
+        done
+
+        print_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
+
+        start_timer "Delete $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_delete_policy_parallel 204 $NUM_RICS $START_ID $NUM_POLICIES_PER_RIC 7
+
+        print_timer "Delete $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        stop_policy_agent
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        sleep_wait 200
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            if [ $interface == "REST+SDNC" ]; then
+                sim_contains_str ricsim_g1_$i remote_hosts $SDNC_APP_NAME
+            else
+                sim_contains_str ricsim_g1_$i remote_hosts $POLICY_AGENT_APP_NAME
+            fi
+        done
+
+        check_policy_agent_logs
+        if [[ $interface = *"SDNC"* ]]; then
+            check_sdnc_logs
+        fi
+
+        store_logs          "${__httpx}__${interface}"
+
+    done
+
+done
+
+
+#### TEST COMPLETE ####
+
+print_result
+
+auto_clean_environment
\ No newline at end of file
index 7ff71d0..44015d6 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 20ece58..451da3f 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
@@ -64,10 +64,6 @@ TESTED_PROTOCOLS="HTTP HTTPS"
 NUM_RICS=20
 NUM_POLICIES_PER_RIC=500
 
-
-NUM_RICS=8
-NUM_POLICIES_PER_RIC=11
-
 generate_policy_uuid
 
 if [ "$PMS_VERSION" == "V2" ]; then
index 7f5ca20..33ebd69 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 1001655..1edefbd 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 34a8165..f9d08b5 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 9d9d559..66b6454 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU  ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-DAWN"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 871eb98..8882ce7 100644 (file)
@@ -49,9 +49,14 @@ Note that ECS was not available before oran cherry so a test script without ECS
 
 ONAP HONOLULU
 =============
-./PM_EI_DEMO.sh remote-remove  docker   --env-file ../common/test_env-onap-honolulu.sh
-./PM_EI_DEMO.sh remote-remove  kube   --env-file ../common/test_env-onap-honolulu.sh
-Note: When honolulu is released, add the 'release' arg to run released images.
+./PM_EI_DEMO.sh remote-remove  docker release  --env-file ../common/test_env-onap-honolulu.sh
+./PM_EI_DEMO.sh remote-remove  kube  release --env-file ../common/test_env-onap-honolulu.sh
+
+ONAP ISTANBUL
+=============
+./PM_EI_DEMO.sh remote-remove  docker   --env-file ../common/test_env-onap-istanbul.sh
+./PM_EI_DEMO.sh remote-remove  kube   --env-file ../common/test_env-onap-istanbul.sh
+Note: When istanbul is released, add the 'release' arg to run released images.
 ```
 ## Test case categories
 The test script are number using these basic categories where 0-999 are releated to the policy managment and 1000-1999 are related to enrichment management. 2000-2999 are for southbound http proxy. There are also demo test cases that test more or less all components. These test scripts does not use the numbering scheme below.
index bc68808..bdc6c7d 100644 (file)
@@ -1 +1,2 @@
 NO-CHECKIN
+TEST_TMP
index 99ee015..8553519 100644 (file)
@@ -146,8 +146,8 @@ The script can be started with these arguments
 | `--use-snapshot-image` | The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names |
 | `--use-staging-image` | The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names |
 | `--use-release-image` | The script will use images from the nexus release repo for the supplied apps, space separated list of app short names |
-| `--image-repo` |  Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo
-
+| `--image-repo` |  Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo |
+| `--cluster-timeout` |  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds |
 | `help` | Print this info along with the test script description and the list of app short names supported |
 
 ## Function: setup_testenvironment
@@ -1004,8 +1004,14 @@ Start the ECS container in docker or kube depending on running mode.
 |--|
 | None |
 
-## Function: restart_ecs ##
-Restart the ECS container.
+## Function: stop_ecs ##
+Stop the ECS container.
+| arg list |
+|--|
+| None |
+
+## Function: start_stopped_ecs ##
+Start a previously stopped ecs.
 | arg list |
 |--|
 | None |
index 0c2e48a..cb48d78 100644 (file)
@@ -96,6 +96,9 @@ PA_ADAPTER=$PA_PATH
 # Make curl retries towards the agent for http response codes set in this env var, space separated list of codes
 AGENT_RETRY_CODES=""
 
+#Save first worker node the pod is started on
+__PA_WORKER_NODE=""
+
 ###########################
 ### Policy Agents functions
 ###########################
@@ -203,6 +206,13 @@ start_policy_agent() {
                        export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config"
                        export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data"
                        export POLICY_AGENT_PKG_NAME
+
+                       export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv"
+                       export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc"
+                       ##Create a unique path for the pv each time to prevent a previous volume to be reused
+                       export POLICY_AGENT_PV_PATH="padata-"$(date +%s)
+                       export POLICY_AGENT_CONTAINER_MNT_DIR
+
                        if [ $1 == "PROXY" ]; then
                                AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
                                AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
@@ -237,6 +247,16 @@ start_policy_agent() {
                        output_yaml=$PWD/tmp/pa_cfd.yaml
                        __kube_create_configmap $POLICY_AGENT_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest PA $data_json $output_yaml
 
+                       ## Create pv
+                       input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pv.yaml
+                       output_yaml=$PWD/tmp/pa_pv.yaml
+                       __kube_create_instance pv $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
+                       ## Create pvc
+                       input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pvc.yaml
+                       output_yaml=$PWD/tmp/pa_pvc.yaml
+                       __kube_create_instance pvc $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
                        # Create service
                        input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"svc.yaml
                        output_yaml=$PWD/tmp/pa_svc.yaml
@@ -249,6 +269,12 @@ start_policy_agent() {
 
                fi
 
+               # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+               __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+               if [ -z "$__PA_WORKER_NODE" ]; then
+                       echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+               fi
+
                echo " Retrieving host and ports for service..."
                PA_HOST_NAME=$(__kube_get_service_host $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                POLICY_AGENT_EXTERNAL_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -274,6 +300,25 @@ start_policy_agent() {
                        exit
                fi
 
+               curdir=$PWD
+               cd $SIM_GROUP
+               cd policy_agent
+               cd $POLICY_AGENT_HOST_MNT_DIR
+               #cd ..
+               if [ -d db ]; then
+                       if [ "$(ls -A $DIR)" ]; then
+                               echo -e $BOLD" Cleaning files in mounted dir: $PWD/db"$EBOLD
+                               rm -rf db/*  &> /dev/null
+                               if [ $? -ne 0 ]; then
+                                       echo -e $RED" Cannot remove database files in: $PWD"$ERED
+                                       exit 1
+                               fi
+                       fi
+               else
+                       echo " No files in mounted dir or dir does not exists"
+               fi
+               cd $curdir
+
                #Export all vars needed for docker-compose
                export POLICY_AGENT_APP_NAME
                export POLICY_AGENT_APP_NAME_ALIAS
@@ -291,6 +336,7 @@ start_policy_agent() {
                export POLICY_AGENT_CONFIG_FILE
                export POLICY_AGENT_PKG_NAME
                export POLICY_AGENT_DISPLAY_NAME
+               export POLICY_AGENT_CONTAINER_MNT_DIR
 
                if [ $1 == "PROXY" ]; then
                        AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
@@ -320,6 +366,79 @@ start_policy_agent() {
        return 0
 }
 
+# Stop the policy agent
+# args: -
+# args: -
+# (Function for test scripts)
+stop_policy_agent() {
+       echo -e $BOLD"Stopping $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+               __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
+               echo "  Deleting the replica set - a new will be started when the app is started"
+               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
+               if [ $? -ne 0 ]; then
+                       echo -e $RED" Could not delete replica set "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       else
+               docker stop $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not stop $POLICY_AGENT_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
+       echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+       echo ""
+       return 0
+}
+
+# Start a previously stopped policy agent
+# args: -
+# (Function for test scripts)
+start_stopped_policy_agent() {
+       echo -e $BOLD"Starting (the previously stopped) $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Tie the PMS to the same worker node it was initially started on
+               # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+               if [ -z "$__PA_WORKER_NODE" ]; then
+                       echo -e $RED" No initial worker node found for pod "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               else
+                       echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+                       echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+                       tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
+                       if [ $? -ne 0 ]; then
+                               echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+                       __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+       else
+               docker start $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not start (the stopped) $POLICY_AGENT_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
+       __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
+       echo ""
+       return 0
+}
+
+
+
 # Load the the appl config for the agent into a config map
 agent_load_config() {
        echo -e $BOLD"Agent - load config from "$EBOLD$1
index 437b207..134f50c 100644 (file)
@@ -35,7 +35,7 @@ __CR_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __CR_imagepull() {
-       echo -e $RED" Image for app CR shall never be pulled from remove repo"$ERED
+       echo -e $RED" Image for app CR shall never be pulled from remote repo"$ERED
 }
 
 # Build image (only for simulator or interfaces stubs owned by the test environment)
index 4ce8bc4..ec69e13 100644 (file)
@@ -84,7 +84,7 @@ try:
                         retry_cnt -= 1
                         total_retry_count += 1
                     else:
-                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code))
+                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code)+str(resp.raw))
                         sys.exit()
                 else:
                     retry_cnt=-1
index 525ac8b..ba6af92 100644 (file)
@@ -96,6 +96,9 @@ ECS_ADAPTER=$ECS_PATH
 # Make curl retries towards ECS for http response codes set in this env var, space separated list of codes
 ECS_RETRY_CODES=""
 
+#Save first worker node the pod is started on
+__ECS_WORKER_NODE=""
+
 ###########################
 ### ECS functions
 ###########################
@@ -205,6 +208,7 @@ start_ecs() {
                        export ECS_CONTAINER_MNT_DIR
 
                        export ECS_DATA_PV_NAME=$ECS_APP_NAME"-pv"
+                       export ECS_DATA_PVC_NAME=$ECS_APP_NAME"-pvc"
                        #Create a unique path for the pv each time to prevent a previous volume to be reused
                        export ECS_PV_PATH="ecsdata-"$(date +%s)
 
@@ -251,6 +255,15 @@ start_ecs() {
                        __kube_create_instance app $ECS_APP_NAME $input_yaml $output_yaml
                fi
 
+               # Tie the ECS to a worker node so that ECS will always be scheduled to the same worker node if the ECS pod is restarted
+               # A PVC of type hostPath is mounted to ECS, for persistent storage, so the ECS must always be on the node which mounted the volume
+
+               # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+               __ECS_WORKER_NODE=$(kubectl get pod -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+               if [ -z "$__ECS_WORKER_NODE" ]; then
+                       echo -e $YELLOW" Cannot find worker node for pod for $ECS_APP_NAME, persistency may not work"$EYELLOW
+               fi
+
                echo " Retrieving host and ports for service..."
                ECS_HOST_NAME=$(__kube_get_service_host $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                ECS_EXTERNAL_PORT=$(__kube_get_service_port $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -337,20 +350,73 @@ start_ecs() {
        return 0
 }
 
-# Restart ECS
+# Stop the ecs
+# args: -
 # args: -
 # (Function for test scripts)
-restart_ecs() {
-       echo -e $BOLD"Re-starting ECS"$EBOLD
-       docker restart $ECS_APP_NAME &> ./tmp/.dockererr
-       if [ $? -ne 0 ]; then
-               __print_err "Could not restart $ECS_APP_NAME" $@
-               cat ./tmp/.dockererr
-               ((RES_CONF_FAIL++))
-               return 1
+stop_ecs() {
+       echo -e $BOLD"Stopping $ECS_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+               __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ECS
+               echo "  Deleting the replica set - a new will be started when the app is started"
+               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ECS")
+               if [ $? -ne 0 ]; then
+                       echo -e $RED" Could not delete replica set "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       else
+               docker stop $ECS_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not stop $ECS_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
        fi
+       echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+       echo ""
+       return 0
+}
 
+# Start a previously stopped ecs
+# args: -
+# (Function for test scripts)
+start_stopped_ecs() {
+       echo -e $BOLD"Starting (the previously stopped) $ECS_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Tie the PMS to the same worker node it was initially started on
+               # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+               if [ -z "$__ECS_WORKER_NODE" ]; then
+                       echo -e $RED" No initial worker node found for pod "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               else
+                       echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ECS_WORKER_NODE to deployment for $ECS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+                       echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+                       tmp=$(kubectl patch deployment $ECS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ECS_WORKER_NODE'"}}}}}')
+                       if [ $? -ne 0 ]; then
+                               echo -e $YELLOW" Cannot set nodeSelector to deployment for $ECS_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+                       __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+       else
+               docker start $ECS_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not start (the stopped) $ECS_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
        __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
        echo ""
        return 0
 }
index 374f3ec..5cac74c 100644 (file)
@@ -83,14 +83,6 @@ __KUBEPROXY_store_docker_logs() {
 
 #######################################################
 
-
-## Access to Kube Http Proxy
-# Host name may be changed if app started by kube
-# Direct access from script
-#BMXX  KUBE_PROXY_HTTPX="http"
-#BMXX KUBE_PROXY_HOST_NAME=$LOCALHOST_NAME
-#BMXX KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$KUBE_PROXY_HOST_NAME":"$KUBE_PROXY_WEB_EXTERNAL_PORT
-
 #########################
 ### Http Proxy functions
 #########################
@@ -157,27 +149,56 @@ start_kube_proxy() {
                echo " Retrieving host and ports for service..."
 
                CLUSTER_KUBE_PROXY="http"
-               CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
-               if [[ $CLUSTER_KUBE_PROXY_HOST == *"kubernetes"* ]]; then
-                       echo -e $YELLOW" The cluster host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"
+
+               #Finding host of the proxy
+               echo "  Trying to find svc hostname..."
+               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
+
+
+               if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
+                       #Local host found
+                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"$EYELLOW
                        CLUSTER_KUBE_PROXY_HOST="127.0.0.1"
+               else
+                       if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                               #Host of proxy not found, trying to find the ip....
+                               echo "  Trying to find svc ip..."
+                               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].ip}")
+                               if [ ! -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                                       #Host ip found
+                                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                               fi
+                       else
+                               #Host or ip of proxy found
+                               echo -e $YELLOW" The test environment host/ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                       fi
+               fi
+               if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                       #Host/ip of proxy not found, try to use the cluster and the nodeports of the proxy
+                       CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+                       echo -e $YELLOW" The test environment cluster ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                       CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
+                       KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
+                       echo " Cluster ip/host, cluster http nodeport, cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
+               else
+                       #Find the service ports of the proxy
+                       CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
+                       KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
+                       echo " Proxy ip/host, proxy http port, proxy web port: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
                fi
-               CLUSTER_KUBE_PROXY_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
-               KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
 
                KUBE_PROXY_WEB_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$KUBE_PROXY_WEB_NODEPORT
 
-               echo " Cluster ip/host, cluster http nodeport cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_NODEPORT $KUBE_PROXY_WEB_NODEPORT"
-
                export KUBE_PROXY_PATH=  # Make sure proxy is empty when checking the proxy itself
                __check_service_start $KUBE_PROXY_APP_NAME $KUBE_PROXY_WEB_PATH$KUBE_PROXY_ALIVE_URL
 
                # Set proxy for all subsequent calls for all services etc
-               export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_NODEPORT
+               export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_PORT
 
        else
                echo $YELLOW" Kube http proxy not needed in docker test. App not started"
        fi
        echo ""
+
 }
 
index 45d0931..3569f6c 100755 (executable)
@@ -44,7 +44,7 @@ __DMAAPMR_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __MR_imagepull() {
-       echo -e $RED"Image for app CR shall never be pulled from remove repo"$ERED
+       echo -e $RED"Image for app MR shall never be pulled from remote repo"$ERED
 }
 
 # Pull image from remote repo or use locally built image
index ae3f193..744e357 100644 (file)
@@ -35,7 +35,7 @@ __PRODSTUB_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __PRODSTUB_imagepull() {
-       echo -e $RED"Image for app PRODSTUB shall never be pulled from remove repo"$ERED
+       echo -e $RED"Image for app PRODSTUB shall never be pulled from remote repo"$ERED
 }
 
 # Build image (only for simulator or interfaces stubs owned by the test environment)
index bf30310..785ff9a 100644 (file)
@@ -314,7 +314,7 @@ __find_sim_port() {
 __find_sim_host() {
        if [ $RUNMODE == "KUBE" ]; then
                ricname=$(echo "$1" | tr '_' '-')
-               for timeout in {1..60}; do
+               for timeout in {1..500}; do   # long waiting time needed in case of starting large number of sims
                        host=$(kubectl get pod $ricname  -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.status.podIP}' 2> /dev/null)
                        if [ ! -z "$host" ]; then
                                echo $RIC_SIM_HTTPX"://"$host":"$RIC_SIM_PORT
index 0895947..59a1a0c 100755 (executable)
@@ -181,6 +181,7 @@ POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in containe
 POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
index b738755..337d3e1 100755 (executable)
@@ -204,6 +204,7 @@ POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in containe
 POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
diff --git a/test/common/test_env-onap-istanbul.sh b/test/common/test_env-onap-istanbul.sh
new file mode 100644 (file)
index 0000000..0c06813
--- /dev/null
@@ -0,0 +1,372 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+#Profile for ONAP honolulu release
+TEST_ENV_PROFILE="ONAP-ISTANBUL"
+FLAVOUR="ONAP"
+
+########################################
+## Nexus repo settings
+########################################
+
+# Nexus repos for developed images
+NEXUS_PROXY_REPO="nexus3.onap.org:10001/"
+NEXUS_RELEASE_REPO="nexus3.onap.org:10002/"
+NEXUS_SNAPSHOT_REPO="nexus3.onap.org:10003/"
+NEXUS_STAGING_REPO=$NEXUS_SNAPSHOT_REPO  #staging repo not used in ONAP, using snapshot
+
+# Nexus repos for images used by test (not developed by the project)
+NEXUS_RELEASE_REPO_ORAN="nexus3.o-ran-sc.org:10002/" # Only for released ORAN images
+NEXUS_RELEASE_REPO_ONAP=$NEXUS_RELEASE_REPO
+
+########################################
+# Set up of image and tags for the test.
+########################################
+
+# NOTE: One environment variable containing the image name and tag is create by the test script
+# for each image from the env variables below.
+# The variable is created by removing the suffix "_BASE" from the base image variable name.
+# Example: POLICY_AGENT_IMAGE_BASE -> POLICY_AGENT_IMAGE
+# This var will point to the local or remote image depending on cmd line arguments.
+# In addition, the repo and the image tag version are selected from the list of image tags based on the cmd line argurment.
+# For images built by the script, only tag #1 shall be specified
+# For project images, only tag #1, #2, #3 and #4 shall be specified
+# For ORAN images (non project), only tag #5 shall be specified
+# For ONAP images (non project), only tag #6 shall be specified
+# For all other images, only tag #7 shall be specified
+# 1 XXX_LOCAL: local images: <image-name>:<local-tag>
+# 2 XXX_REMOTE_SNAPSHOT: snapshot images: <snapshot-nexus-repo><image-name>:<snapshot-tag>
+# 3 XXX_REMOTE: staging images: <staging-nexus-repo><image-name>:<staging-tag>
+# 4 XXX_REMOTE_RELEASE: release images: <release-nexus-repo><image-name>:<release-tag>
+# 5 XXX_REMOTE_RELEASE_ORAN: ORAN release images: <oran-release-nexus-repo><image-name>:<release-tag>
+# 6 XXX_REMOTE_RELEASE_ONAP: ONAP release images: <onap-release-nexus-repo><image-name>:<release-tag>
+# 7 XXX_PROXY: other images, not produced by the project: <proxy-nexus-repo><mage-name>:<proxy-tag>
+
+#############################################################################
+# Note:
+# The imgage tags for pms and sdnc are updated AFTER the release.
+# This means that the latest staging/snapshot images for these two components have
+# version one step (0.0.1 - bug-level) higher than the
+# latest release image version.
+
+# This is only applicable for ONAP images
+#############################################################################
+
+# Policy Agent image and tags
+POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.0-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.0"
+
+# SDNC A1 Controller remote image and tag
+SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
+SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.2.0-SNAPSHOT" ###CHECK THIS
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.0-STAGING-latest"
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.2.0-STAGING-latest"  #Will use snapshot repo
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.2.0"
+
+#SDNC DB remote image and tag
+#The DB is part of SDNC so handled in the same way as SDNC
+SDNC_DB_IMAGE_BASE="mariadb"
+SDNC_DB_IMAGE_TAG_REMOTE_PROXY="10.5"
+
+# ECS image and tag - uses cherry release
+ECS_IMAGE_BASE="o-ran-sc/nonrtric-enrichment-coordinator-service"
+ECS_IMAGE_TAG_REMOTE_RELEASE_ORAN="1.0.1"
+
+
+# Control Panel image and tag - uses cherry release
+CONTROL_PANEL_IMAGE_BASE="o-ran-sc/nonrtric-controlpanel"
+CONTROL_PANEL_IMAGE_TAG_REMOTE_RELEASE_ORAN="2.1.1"
+
+
+# RAPP Catalogue image and tags - uses cherry release
+RAPP_CAT_IMAGE_BASE="o-ran-sc/nonrtric-r-app-catalogue"
+RAPP_CAT_IMAGE_TAG_REMOTE_RELEASE_ORAN="1.0.1"
+
+
+# Near RT RIC Simulator image and tags - uses cherry release
+RIC_SIM_IMAGE_BASE="o-ran-sc/a1-simulator"
+RIC_SIM_IMAGE_TAG_REMOTE_RELEASE_ORAN="2.1.0"
+
+
+#Consul remote image and tag
+CONSUL_IMAGE_BASE="consul"
+CONSUL_IMAGE_TAG_REMOTE_PROXY="1.7.2"
+#No local image for Consul, remote image always used
+
+
+#CBS remote image and tag
+CBS_IMAGE_BASE="onap/org.onap.dcaegen2.platform.configbinding.app-app"
+CBS_IMAGE_TAG_REMOTE_RELEASE_ONAP="2.3.0"
+#No local image for CBS, remote image always used
+
+
+#MR stub image and tag
+MRSTUB_IMAGE_BASE="mrstub"
+MRSTUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for MR stub, local image always used
+
+
+#Callback receiver image and tag
+CR_IMAGE_BASE="callback-receiver"
+CR_IMAGE_TAG_LOCAL="latest"
+#No remote image for CR, local image always used
+
+
+#Producer stub image and tag
+PROD_STUB_IMAGE_BASE="producer-stub"
+PROD_STUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for producer stub, local image always used
+
+
+#Http proxy remote image and tag
+HTTP_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
+HTTP_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
+#No local image for http proxy, remote image always used
+
+#ONAP Zookeeper remote image and tag
+ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+#No local image for ONAP Zookeeper, remote image always used
+
+#ONAP Kafka remote image and tag
+ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+#No local image for ONAP Kafka, remote image always used
+
+#ONAP DMAAP-MR remote image and tag
+ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+#No local image for ONAP DMAAP-MR, remote image always used
+
+#Kube proxy remote image and tag
+KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
+KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
+#No local image for http proxy, remote image always used
+
+# List of app short names produced by the project
+PROJECT_IMAGES_APP_NAMES="PA SDNC"
+
+# List of app short names which images pulled from ORAN
+ORAN_IMAGES_APP_NAMES="CP ECS RICSIM RC"
+
+# List of app short names which images pulled from ONAP
+ONAP_IMAGES_APP_NAMES=""   # Not used
+
+
+########################################
+# Detailed settings per app
+########################################
+
+
+DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker private network
+
+KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
+KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+
+POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
+POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
+POLICY_AGENT_EXTERNAL_SECURE_PORT=8433                   # Policy Agent container external secure port (host -> container)
+POLICY_AGENT_INTERNAL_SECURE_PORT=8433                   # Policy Agent container internal secure port (container -> container)
+POLICY_AGENT_APIS="V1 V2"                                # Supported northbound api versions
+PMS_VERSION="V2"                                         # Tested version of northbound API
+PMS_API_PREFIX="/a1-policy"                               # api url prefix, only for V2. Shall contain leading "/"
+
+POLICY_AGENT_APP_NAME="policymanagementservice"          # Name for Policy Agent container
+POLICY_AGENT_DISPLAY_NAME="Policy Management Service"
+POLICY_AGENT_HOST_MNT_DIR="./mnt"                        # Mounted dir, relative to compose file, on the host
+POLICY_AGENT_LOGPATH="/var/log/policy-agent/application.log" # Path the application log in the Policy Agent container
+POLICY_AGENT_APP_NAME_ALIAS="policy-agent-container"     # Alias name, name used by the control panel
+POLICY_AGENT_CONFIG_KEY="policy-agent"                   # Key for consul config
+POLICY_AGENT_PKG_NAME="org.onap.ccsdk.oran.a1policymanagementservice"  # Java base package name
+POLICY_AGENT_ACTUATOR="/actuator/loggers/$POLICY_AGENT_PKG_NAME" # Url for trace/debug
+POLICY_AGENT_ALIVE_URL="$PMS_API_PREFIX/v2/status"       # Base path for alive check
+POLICY_AGENT_COMPOSE_DIR="policy_agent"                  # Dir in simulator_group for docker-compose
+POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in container for config file
+POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
+POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
+POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+
+ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
+ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
+ECS_EXTERNAL_PORT=8083                                   # ECS container external port (host -> container)
+ECS_INTERNAL_PORT=8083                                   # ECS container internal port (container -> container)
+ECS_EXTERNAL_SECURE_PORT=8434                            # ECS container external secure port (host -> container)
+ECS_INTERNAL_SECURE_PORT=8434                            # ECS container internal secure port (container -> container)
+
+ECS_LOGPATH="/var/log/enrichment-coordinator-service/application.log" # Path the application log in the ECS container
+ECS_APP_NAME_ALIAS="enrichment-service-container"        # Alias name, name used by the control panel
+ECS_HOST_MNT_DIR="./mnt"                                 # Mounted dir, relative to compose file, on the host
+ECS_CONTAINER_MNT_DIR="/var/enrichment-coordinator-service" # Mounted dir in the container
+ECS_ACTUATOR="/actuator/loggers/org.oransc.enrichment"   # Url for trace/debug
+ECS_CERT_MOUNT_DIR="./cert"
+ECS_ALIVE_URL="/status"                                  # Base path for alive check
+ECS_COMPOSE_DIR="ecs"                                    # Dir in simulator_group for docker-compose
+ECS_CONFIG_MOUNT_PATH=/opt/app/enrichment-coordinator-service/config # Internal container path for configuration
+ECS_CONFIG_FILE=application.yaml                         # Config file name
+ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
+
+MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
+MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
+MR_STUB_DISPLAY_NAME="Message Router stub"
+MR_STUB_CERT_MOUNT_DIR="./cert"
+MR_EXTERNAL_PORT=3904                                    # MR dmaap/stub container external port
+MR_INTERNAL_PORT=3904                                    # MR dmaap/stub container internal port
+MR_EXTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container external secure port
+MR_INTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container internal secure port
+MR_DMAAP_LOCALHOST_PORT=3904                             # MR stub container external port (host -> container)
+MR_STUB_LOCALHOST_PORT=3908                              # MR stub container external port (host -> container)
+MR_DMAAP_LOCALHOST_SECURE_PORT=3905                      # MR stub container internal port (container -> container)
+MR_STUB_LOCALHOST_SECURE_PORT=3909                       # MR stub container external secure port (host -> container)
+MR_READ_TOPIC="A1-POLICY-AGENT-READ"                     # Read topic
+MR_WRITE_TOPIC="A1-POLICY-AGENT-WRITE"                   # Write topic
+MR_READ_URL="/events/$MR_READ_TOPIC/users/policy-agent?timeout=15000&limit=100" # Path to read messages from MR
+MR_WRITE_URL="/events/$MR_WRITE_TOPIC"                   # Path to write messages to MR
+MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub alive check
+MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
+MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
+MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
+MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+
+CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
+CR_DISPLAY_NAME="Callback Reciever"
+CR_EXTERNAL_PORT=8090                                    # Callback receiver container external port (host -> container)
+CR_INTERNAL_PORT=8090                                    # Callback receiver container internal port (container -> container)
+CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
+CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
+CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
+CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_ALIVE_URL="/"                                         # Base path for alive check
+CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
+
+PROD_STUB_APP_NAME="producer-stub"                       # Name for the Producer stub
+PROD_STUB_DISPLAY_NAME="Producer Stub"
+PROD_STUB_EXTERNAL_PORT=8092                             # Producer stub container external port (host -> container)
+PROD_STUB_INTERNAL_PORT=8092                             # Producer stub container internal port (container -> container)
+PROD_STUB_EXTERNAL_SECURE_PORT=8093                      # Producer stub container external secure port (host -> container)
+PROD_STUB_INTERNAL_SECURE_PORT=8093                      # Producer stub container internal secure port (container -> container)
+PROD_STUB_JOB_CALLBACK="/callbacks/job"                  # Callback path for job create/update/delete
+PROD_STUB_SUPERVISION_CALLBACK="/callbacks/supervision"  # Callback path for producre supervision
+PROD_STUB_ALIVE_URL="/"                                  # Base path for alive check
+PROD_STUB_COMPOSE_DIR="prodstub"                         # Dir in simulator_group for docker-compose
+
+CONSUL_HOST="consul-server"                              # Host name of consul
+CONSUL_DISPLAY_NAME="Consul"
+CONSUL_EXTERNAL_PORT=8500                                # Consul container external port (host -> container)
+CONSUL_INTERNAL_PORT=8500                                # Consul container internal port (container -> container)
+CONSUL_APP_NAME="polman-consul"                          # Name for consul container
+CONSUL_ALIVE_URL="/ui/dc1/kv"                            # Base path for alive check
+CONSUL_CBS_COMPOSE_DIR="consul_cbs"                      # Dir in simulator group for docker compose
+
+CBS_APP_NAME="polman-cbs"                                # Name for CBS container
+CBS_DISPLAY_NAME="Config Binding Service"
+CBS_EXTERNAL_PORT=10000                                  # CBS container external port (host -> container)
+CBS_INTERNAL_PORT=10000                                  # CBS container internal port (container -> container)
+CONFIG_BINDING_SERVICE="config-binding-service"          # Host name of CBS
+CBS_ALIVE_URL="/healthcheck"                             # Base path for alive check
+
+RIC_SIM_DISPLAY_NAME="Near-RT RIC A1 Simulator"
+RIC_SIM_BASE="g"                                         # Base name of the RIC Simulator container, shall be the group code
+                                                         # Note, a prefix is added to each container name by the .env file in the 'ric' dir
+RIC_SIM_PREFIX="ricsim"                                  # Prefix added to ric container name, added in the .env file in the 'ric' dir
+                                                         # This prefix can be changed from the command line
+RIC_SIM_INTERNAL_PORT=8085                               # RIC Simulator container internal port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_INTERNAL_SECURE_PORT=8185                        # RIC Simulator container internal secure port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_CERT_MOUNT_DIR="./cert"
+RIC_SIM_COMPOSE_DIR="ric"                                # Dir in simulator group for docker compose
+RIC_SIM_ALIVE_URL="/"
+
+SDNC_APP_NAME="a1controller"                             # Name of the SNDC A1 Controller container
+SDNC_DISPLAY_NAME="SDNC A1 Controller"
+SDNC_EXTERNAL_PORT=8282                                  # SNDC A1 Controller container external port (host -> container)
+SDNC_INTERNAL_PORT=8181                                  # SNDC A1 Controller container internal port (container -> container)
+SDNC_EXTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container external securee port (host -> container)
+SDNC_INTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container internal secure port (container -> container)
+SDNC_DB_APP_NAME="sdncdb"                                # Name of the SDNC DB container
+SDNC_A1_TRUSTSTORE_PASSWORD="a1adapter"                  # SDNC truststore password
+SDNC_USER="admin"                                        # SDNC username
+SDNC_PWD="admin"                                         # SNDC PWD
+SDNC_PWD="Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"   # SNDC PWD
+#SDNC_API_URL="/rests/operations/A1-ADAPTER-API:"         # Base url path for SNDC API (for upgraded sdnc)
+SDNC_API_URL="/restconf/operations/A1-ADAPTER-API:"      # Base url path for SNDC API
+SDNC_ALIVE_URL="/apidoc/explorer/"                       # Base url path for SNDC API docs (for alive check)
+SDNC_COMPOSE_DIR="sdnc"
+SDNC_COMPOSE_FILE="docker-compose-2.yml"
+SDNC_KUBE_APP_FILE="app2.yaml"
+SDNC_KARAF_LOG="/opt/opendaylight/data/log/karaf.log"    # Path to karaf log
+#SDNC_RESPONSE_JSON_KEY="A1-ADAPTER-API:output"           # Key name for output json in replies from sdnc (for upgraded sdnc)
+SDNC_RESPONSE_JSON_KEY="output"                          # Key name for output json in replies from sdnc
+
+RAPP_CAT_APP_NAME="rappcatalogueservice"                 # Name for the RAPP Catalogue
+RAPP_CAT_DISPLAY_NAME="RAPP Catalogue Service"
+RAPP_CAT_EXTERNAL_PORT=8680                              # RAPP Catalogue container external port (host -> container)
+RAPP_CAT_INTERNAL_PORT=8680                              # RAPP Catalogue container internal port (container -> container)
+RAPP_CAT_EXTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container external secure port (host -> container)
+RAPP_CAT_INTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container internal secure port (container -> container)
+RAPP_CAT_ALIVE_URL="/services"                           # Base path for alive check
+RAPP_CAT_COMPOSE_DIR="rapp_catalogue"                    # Dir in simulator_group for docker-compose
+
+CONTROL_PANEL_APP_NAME="controlpanel"                    # Name of the Control Panel container
+CONTROL_PANEL_DISPLAY_NAME="Non-RT RIC Control Panel"
+CONTROL_PANEL_EXTERNAL_PORT=8080                         # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_PORT=8080                         # Control Panel container internal port (container -> container)
+CONTROL_PANEL_EXTERNAL_SECURE_PORT=8880                  # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_SECURE_PORT=8082                  # Control Panel container intternal port (container -> container)
+CONTROL_PANEL_LOGPATH="/logs/nonrtric-controlpanel.log"  # Path the application log in the Control Panel container
+CONTROL_PANEL_ALIVE_URL="/"                              # Base path for alive check
+CONTROL_PANEL_COMPOSE_DIR="control_panel"                # Dir in simulator_group for docker-compose
+CONTROL_PANEL_CONFIG_MOUNT_PATH=/maven                   # Container internal path for config
+CONTROL_PANEL_CONFIG_FILE=application.properties         # Config file name
+CONTROL_PANEL_HOST_MNT_DIR="./mnt"                       # Mounted dir, relative to compose file, on the host
+
+HTTP_PROXY_APP_NAME="httpproxy"                          # Name of the Http Proxy container
+HTTP_PROXY_DISPLAY_NAME="Http Proxy"
+HTTP_PROXY_EXTERNAL_PORT=8780                            # Http Proxy container external port (host -> container)
+HTTP_PROXY_INTERNAL_PORT=8080                            # Http Proxy container internal port (container -> container)
+HTTP_PROXY_WEB_EXTERNAL_PORT=8781                        # Http Proxy container external port (host -> container)
+HTTP_PROXY_WEB_INTERNAL_PORT=8081                        # Http Proxy container internal port (container -> container)
+HTTP_PROXY_CONFIG_PORT=0                                 # Port number for proxy config, will be set if proxy is started
+HTTP_PROXY_CONFIG_HOST_NAME=""                           # Proxy host, will be set if proxy is started
+HTTP_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+HTTP_PROXY_COMPOSE_DIR="httpproxy"                       # Dir in simulator_group for docker-compose
+
+
+KUBE_PROXY_APP_NAME="kubeproxy"                          # Name of the Kube Http Proxy container
+KUBE_PROXY_DISPLAY_NAME="Kube Http Proxy"
+KUBE_PROXY_EXTERNAL_PORT=8730                            # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_INTERNAL_PORT=8080                            # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
+KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
+
+########################################
+# Setting for common curl-base function
+########################################
+
+UUID=""                                                  # UUID used as prefix to the policy id to simulate a real UUID
+                                                         # Testscript need to set the UUID to use other this empty prefix is used
index 9039b8d..ca994aa 100755 (executable)
@@ -207,6 +207,7 @@ POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in containe
 POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
index e9b82f6..211e068 100755 (executable)
@@ -226,6 +226,8 @@ POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in containe
 POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
index b232577..1f6d135 100755 (executable)
@@ -27,6 +27,7 @@ __print_args() {
        echo "Args: remote|remote-remove docker|kube --env-file <environment-filename> [release] [auto-clean] [--stop-at-error] "
        echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
        echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
+       echo "      [--cluster-timeout <timeout-in seconds>]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -51,7 +52,8 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "--use-snapshot-image  -  The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names"
        echo "--use-staging-image   -  The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names"
        echo "--use-release-image   -  The script will use images from the nexus release repo for the supplied apps, space separated list of app short names"
-       echo "--image-repo          -  Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo"
+       echo "--image-repo          -  Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo"
+       echo "--cluster-timeout     -  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds. "
        echo ""
        echo "List of app short names supported: "$APP_SHORT_NAMES
        exit 0
@@ -303,7 +305,7 @@ echo -e "Activity \t Duration" > $TIMER_MEASUREMENTS
 
 # If this is set, all used images will be re-tagged and pushed to this repo before any
 IMAGE_REPO_ADR=""
-
+CLUSTER_TIME_OUT=0
 
 echo "-------------------------------------------------------------------------------------------------"
 echo "-----------------------------------      Test case: "$ATC
@@ -523,6 +525,32 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do
                        fi
                fi
        fi
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--cluster-timeout" ]; then
+                       shift;
+                       CLUSTER_TIME_OUT=$1
+                       if [ -z "$1" ]; then
+                               paramerror=1
+                               if [ -z "$paramerror_str" ]; then
+                                       paramerror_str="No timeout value found for : '--cluster-timeout'"
+                               fi
+                       else
+                               #Check if positive int
+                               case ${CLUSTER_TIME_OUT#[+]} in
+                                       *[!0-9]* | '')
+                                               paramerror=1
+                                               if [ -z "$paramerror_str" ]; then
+                                                       paramerror_str="Value for '--cluster-timeout' not an int : "$CLUSTER_TIME_OUT
+                                               fi
+                                               ;;
+                                       * ) ;; # Ok
+                               esac
+                               echo "Option set - Cluster timeout: "$1
+                               shift;
+                               foundparm=0
+                       fi
+               fi
+       fi
 done
 echo ""
 
@@ -707,7 +735,7 @@ __check_and_create_image_var() {
        echo -e "$tmp" >> $image_list_file
        #Export the env var
        export "${2}"=$image":"$tag  #Note, this var may be set to the value of the target value below in __check_and_pull_image
-       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+       if [ ! -z "$IMAGE_REPO_ADR" ] && [ $5 == "LOCAL" ]; then    # Only push local images if repo is given
                export "${2}_SOURCE"=$image":"$tag  #Var to keep the actual source image
                export "${2}_TARGET"=$IMAGE_REPO_ADR"/"$optional_image_repo_target":"$tag  #Create image + tag for optional image repo - pushed later if needed
        else
@@ -1047,40 +1075,44 @@ setup_testenvironment() {
 
        # The following sequence pull the configured images
 
-       echo -e $BOLD"Pulling configured images, if needed"$EBOLD
 
-       for imagename in $APP_SHORT_NAMES; do
-               __check_included_image $imagename
-               incl=$?
-               __check_project_image $imagename
-               proj=$?
-               if [ $incl -eq 0 ]; then
-                       if [ $proj -eq 0 ]; then
-                               START_ARG_MOD=$START_ARG
-                               __check_image_local_override $imagename
-                               if [ $? -eq 1 ]; then
-                                       START_ARG_MOD="local"
+       echo -e $BOLD"Pulling configured images, if needed"$EBOLD
+       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+               echo -e $YELLOW" Excluding all remote image check/pull when running with image repo: $IMAGE_REPO_ADR"$EYELLOW
+       else
+               for imagename in $APP_SHORT_NAMES; do
+                       __check_included_image $imagename
+                       incl=$?
+                       __check_project_image $imagename
+                       proj=$?
+                       if [ $incl -eq 0 ]; then
+                               if [ $proj -eq 0 ]; then
+                                       START_ARG_MOD=$START_ARG
+                                       __check_image_local_override $imagename
+                                       if [ $? -eq 1 ]; then
+                                               START_ARG_MOD="local"
+                                       fi
+                               else
+                                       START_ARG_MOD=$START_ARG
+                               fi
+                               __check_image_local_build $imagename
+                               #No pull of images built locally
+                               if [ $? -ne 0 ]; then
+                                       # A function name is created from the app short name
+                                       # for example app short name 'HTTPPROXY' -> produce the function
+                                       # name __HTTPPROXY_imagesetup
+                                       # This function is called and is expected to exist in the imported
+                                       # file for the httpproxy test functions
+                                       # The resulting function impl will call '__check_and_pull_image' function
+                                       # with appropriate parameters
+                                       function_pointer="__"$imagename"_imagepull"
+                                       $function_pointer $START_ARG_MOD $START_ARG
                                fi
                        else
-                               START_ARG_MOD=$START_ARG
-                       fi
-                       __check_image_local_build $imagename
-                       #No pull of images built locally
-                       if [ $? -ne 0 ]; then
-                               # A function name is created from the app short name
-                               # for example app short name 'HTTPPROXY' -> produce the function
-                               # name __HTTPPROXY_imagesetup
-                               # This function is called and is expected to exist in the imported
-                               # file for the httpproxy test functions
-                               # The resulting function impl will call '__check_and_pull_image' function
-                               # with appropriate parameters
-                               function_pointer="__"$imagename"_imagepull"
-                               $function_pointer $START_ARG_MOD $START_ARG
+                               echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
                        fi
-               else
-                       echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
-               fi
-       done
+               done
+       fi
 
        #Errors in image setting - exit
        if [ $IMAGE_ERR -ne 0 ]; then
@@ -1126,8 +1158,8 @@ setup_testenvironment() {
 
        echo ""
 
-       # Create a table of the images used in the script
-       echo -e $BOLD"Local docker registry images used in the this test script"$EBOLD
+       # Create a table of the images used in the script - from local repo
+       echo -e $BOLD"Local docker registry images used in this test script"$EBOLD
 
        docker_tmp_file=./tmp/.docker-images-table
        format_string="{{.Repository}}\\t{{.Tag}}\\t{{.CreatedSince}}\\t{{.Size}}\\t{{.CreatedAt}}"
@@ -1136,40 +1168,85 @@ setup_testenvironment() {
        for imagename in $APP_SHORT_NAMES; do
                __check_included_image $imagename
                if [ $? -eq 0 ]; then
-                       # A function name is created from the app short name
-                       # for example app short name 'MR' -> produce the function
-                       # name __MR_imagebuild
-                       # This function is called and is expected to exist in the imported
-                       # file for the mr test functions
-                       # The resulting function impl shall build the imagee
-                       function_pointer="__"$imagename"_image_data"
-                       $function_pointer "$format_string" $docker_tmp_file
+                       # Only print image data if image repo is null, or if image repo is set and image is local
+                       print_image_data=0
+                       if [ -z "$IMAGE_REPO_ADR" ]; then
+                               print_image_data=1
+                       else
+                               __check_image_local_build $imagename
+                               if [ $? -eq 0 ]; then
+                                       print_image_data=1
+                               fi
+                       fi
+                       if [ $print_image_data -eq 1 ]; then
+                               # A function name is created from the app short name
+                               # for example app short name 'MR' -> produce the function
+                               # name __MR_imagebuild
+                               # This function is called and is expected to exist in the imported
+                               # file for the mr test functions
+                               # The resulting function impl shall build the imagee
+                               function_pointer="__"$imagename"_image_data"
+                               $function_pointer "$format_string" $docker_tmp_file
+                       fi
                fi
        done
 
-
        column -t -s $'\t' $docker_tmp_file | indent1
 
        echo ""
+
+       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+
+               # Create a table of the images used in the script - from remote repo
+               echo -e $BOLD"Remote repo images used in this test script"$EBOLD
+               echo -e $YELLOW"-- Note: These image will be pulled when the container starts. Images not managed by the test engine --"$EYELLOW
+
+               docker_tmp_file=./tmp/.docker-images-table
+               format_string="{{.Repository}}\\t{{.Tag}}"
+               echo -e "Application\tRepository\tTag" > $docker_tmp_file
+
+               for imagename in $APP_SHORT_NAMES; do
+                       __check_included_image $imagename
+                       if [ $? -eq 0 ]; then
+                               # Only print image data if image repo is null, or if image repo is set and image is local
+                               __check_image_local_build $imagename
+                               if [ $? -ne 0 ]; then
+                                       # A function name is created from the app short name
+                                       # for example app short name 'MR' -> produce the function
+                                       # name __MR_imagebuild
+                                       # This function is called and is expected to exist in the imported
+                                       # file for the mr test functions
+                                       # The resulting function impl shall build the imagee
+                                       function_pointer="__"$imagename"_image_data"
+                                       $function_pointer "$format_string" $docker_tmp_file
+                               fi
+                       fi
+               done
+
+               column -t -s $'\t' $docker_tmp_file | indent1
+
+               echo ""
+       fi
+
        if [ $RUNMODE == "KUBE" ]; then
 
                echo "================================================================================="
                echo "================================================================================="
 
-               CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
-               if [[ $CLUSTER_IP != *"kubernetes"* ]]; then
-                       echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a multi-node cluster."$EYELLOW
-                       echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
+               if [ -z "$IMAGE_REPO_ADR" ]; then
+                       echo -e $YELLOW" The image pull policy is set to 'Never' - assuming a local image repo is available for all images"$EYELLOW
+                       echo -e " This setting only works on single node clusters on the local machine"
+                       echo -e " It does not work with multi-node clusters or remote clusters. "
                        export KUBE_IMAGE_PULL_POLICY="Never"
-                       if [ -z "$IMAGE_REPO_ADR" ]; then
-                               echo -e $RED" The flag --image-repo need to be provided to the cmd with the path to a custom image repo'."$ERED
-                               exit 1
-                       fi
                else
-                       echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a single-node cluster on a local machine."$EYELLOW
-                       echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
-                       export KUBE_IMAGE_PULL_POLICY="Never"
+                       echo -e $YELLOW" The image pull policy is set to 'Always'"$EYELLOW
+                       echo -e " This setting work on local clusters, multi-node clusters and remote cluster. "
+                       echo -e " Only locally built images are managed. Remote images are always pulled from remote repos"
+                       echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine"
+                       export KUBE_IMAGE_PULL_POLICY="Always"
                fi
+               CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+               echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW
 
                echo "================================================================================="
                echo "================================================================================="
@@ -1292,7 +1369,7 @@ print_result() {
 start_timer() {
        echo -e $BOLD"INFO(${BASH_LINENO[0]}): "${FUNCNAME[0]}"," $@ $EBOLD
        TC_TIMER=$SECONDS
-       echo " Timer started"
+       echo " Timer started: $(date)"
 }
 
 # Print the value of the time (in seconds)
@@ -1815,6 +1892,26 @@ __kube_create_configmap() {
        return 0
 }
 
+# This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
+# The function retries up to the timeout given in the cmd flag '--cluster-timeout'
+# args: <full kubectl cmd with parameters
+# (Not for test scripts)
+__kube_cmd_with_timeout() {
+       TS_TMP=$(($SECONDS+$CLUSTER_TIME_OUT))
+
+       while true; do
+               kube_cmd_result=$($@)
+               if [ $? -ne 0 ]; then
+                       kube_cmd_result=""
+               fi
+               if [ $SECONDS -ge $TS_TMP ] || [ ! -z "$kube_cmd_result" ] ; then
+                       echo $kube_cmd_result
+                       return 0
+               fi
+               sleep 1
+       done
+}
+
 # This function scales or deletes all resources for app selected by the testcase.
 # args: -
 # (Not for test scripts)
@@ -2049,7 +2146,7 @@ __check_service_start() {
        TSTART=$SECONDS
        loop_ctr=0
        while (( $TSTART+600 > $SECONDS )); do
-               result="$(__do_curl $url)"
+               result="$(__do_curl -m 10 $url)"
                if [ $? -eq 0 ]; then
                        if [ ${#result} -gt 15 ]; then
                                #If response is too long, truncate
@@ -2197,7 +2294,13 @@ __do_curl() {
        curlString="curl -skw %{http_code} $proxyflag $@"
        echo " CMD: $curlString" >> $HTTPLOG
        res=$($curlString)
+       retcode=$?
        echo " RESP: $res" >> $HTTPLOG
+       echo " RETCODE: $retcode" >> $HTTPLOG
+       if [ $retcode -ne 0 ]; then
+               echo "<no-response-from-server>"
+               return 1
+       fi
        http_code="${res:${#res}-3}"
        if [ ${#res} -eq 3 ]; then
                if [ $http_code -lt 200 ] || [ $http_code -gt 299 ]; then
index bc1bad9..f1090ce 100644 (file)
@@ -38,6 +38,8 @@ spec:
           name: $ECS_CONFIG_CONFIGMAP_NAME
         name: ecs-conf-name
       - persistentVolumeClaim:
-          claimName: $ECS_DATA_CONFIGMAP_NAME
+          claimName: $ECS_DATA_PVC_NAME
         name: ecs-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
 
index dca9f1c..7f07893 100644 (file)
@@ -1,7 +1,7 @@
 apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
-  name: $ECS_DATA_CONFIGMAP_NAME
+  name: $ECS_DATA_PVC_NAME
   namespace: $KUBE_NONRTRIC_NAMESPACE
   labels:
     run: $ECS_APP_NAME
index ea04483..51cf745 100644 (file)
@@ -7,7 +7,7 @@ metadata:
     run: $KUBE_PROXY_APP_NAME
     autotest: KUBEPROXY
 spec:
-  type: NodePort
+  type: LoadBalancer
   ports:
   - port: $KUBE_PROXY_EXTERNAL_PORT
     targetPort: $KUBE_PROXY_INTERNAL_PORT
index bbe8fae..269d9ca 100644 (file)
@@ -17,6 +17,9 @@ spec:
         run: $POLICY_AGENT_APP_NAME
         autotest: PA
     spec:
+      securityContext:
+        runAsUser: 0
+# Need to run as root to be able to store files in dir mounted as a hostPath volume
       containers:
       - name: $POLICY_AGENT_APP_NAME
         image: $POLICY_AGENT_IMAGE
@@ -29,6 +32,10 @@ spec:
         volumeMounts:
         - mountPath: $POLICY_AGENT_CONFIG_MOUNT_PATH
           name: pa-conf-name
+#        volumeMounts:
+        - mountPath: $POLICY_AGENT_CONTAINER_MNT_DIR
+          name: pa-pv-data-name
+#        volumeMounts:
         - mountPath: $POLICY_AGENT_DATA_MOUNT_PATH
           name: pa-data-name
       volumes:
@@ -39,4 +46,10 @@ spec:
       - configMap:
           defaultMode: 420
           name: $POLICY_AGENT_DATA_CONFIGMAP_NAME
-        name: pa-data-name
\ No newline at end of file
+        name: pa-data-name
+      - persistentVolumeClaim:
+          claimName: $POLICY_AGENT_DATA_PVC_NAME
+        name: pa-pv-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
+
index 49111d8..5c5b08d 100644 (file)
@@ -53,6 +53,8 @@ app:
   # Location of the component configuration file. The file will only be used if the Consul database is not used;
   # configuration from the Consul will override the file.
   filepath: /opt/app/policy-agent/data/application_configuration.json
+  # path where the service can store data
+  vardata-directory: /var/policy-management-service
   webclient:
     # Configuration of the trust store used for the HTTP client (outgoing requests)
     # The file location and the password for the truststore is only relevant if trust-store-used == true
index e89fc86..2261151 100644 (file)
@@ -37,6 +37,7 @@ services:
       - HOSTNAME=${POLICY_AGENT_CONFIG_KEY}
     volumes:
     - ${POLICY_AGENT_HOST_MNT_DIR}/$POLICY_AGENT_CONFIG_FILE:${POLICY_AGENT_CONFIG_MOUNT_PATH}/$POLICY_AGENT_CONFIG_FILE
+    - ${POLICY_AGENT_HOST_MNT_DIR}/db:${POLICY_AGENT_CONTAINER_MNT_DIR}
     labels:
       - "nrttest_app=PA"
       - "nrttest_dp=${POLICY_AGENT_DISPLAY_NAME}"
diff --git a/test/simulator-group/policy_agent/pv.yaml b/test/simulator-group/policy_agent/pv.yaml
new file mode 100644 (file)
index 0000000..332b341
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: $POLICY_AGENT_DATA_PV_NAME
+  annotations:
+    pv.beta.kubernetes.io/gid: "999"
+  labels:
+    run: $POLICY_AGENT_APP_NAME
+    autotest: PA
+spec:
+  storageClassName: pa-standard
+  capacity:
+    storage: 1Mi
+  accessModes:
+    - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Delete
+  hostPath:
+    path: "/tmp/$POLICY_AGENT_PV_PATH"
diff --git a/test/simulator-group/policy_agent/pvc.yaml b/test/simulator-group/policy_agent/pvc.yaml
new file mode 100644 (file)
index 0000000..a62e130
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: $POLICY_AGENT_DATA_PVC_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $POLICY_AGENT_APP_NAME
+    autotest: PA
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Mi
+  storageClassName: pa-standard
+  volumeMode: Filesystem