Support for test of PMS persistency 32/5932/2
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Thu, 22 Apr 2021 21:46:10 +0000 (23:46 +0200)
committerBjornMagnussonXA <bjorn.magnusson@est.tech>
Mon, 26 Apr 2021 23:24:14 +0000 (01:24 +0200)
Minor updates to support for external image repo"

Issue-ID: NONRTRIC-486
Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I95ad81726a7bec776f388c6443792fffbe554db7

22 files changed:
test/auto-test/FTC1800.sh
test/auto-test/FTC805.sh [new file with mode: 0755]
test/auto-test/FTC850.sh
test/common/README.md
test/common/agent_api_functions.sh
test/common/cr_api_functions.sh
test/common/delete_policies_process.py
test/common/ecs_api_functions.sh
test/common/kube_proxy_api_functions.sh
test/common/mr_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/ricsimulator_api_functions.sh
test/common/test_env-oran-dawn.sh
test/common/testcase_common.sh
test/simulator-group/ecs/app.yaml
test/simulator-group/ecs/pvc.yaml
test/simulator-group/kubeproxy/svc.yaml
test/simulator-group/policy_agent/app.yaml
test/simulator-group/policy_agent/application.yaml
test/simulator-group/policy_agent/docker-compose.yml
test/simulator-group/policy_agent/pv.yaml [new file with mode: 0644]
test/simulator-group/policy_agent/pvc.yaml [new file with mode: 0644]

index bbee77b..7ffd095 100755 (executable)
@@ -215,7 +215,9 @@ else
     ecs_equal json:A1-EI/v1/eijobs?eiTypeId=type1 $(($NUM_JOBS/5))
 fi
 
-restart_ecs
+stop_ecs
+
+start_stopped_ecs
 
 set_ecs_trace
 
diff --git a/test/auto-test/FTC805.sh b/test/auto-test/FTC805.sh
new file mode 100755 (executable)
index 0000000..13f534e
--- /dev/null
@@ -0,0 +1,282 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="PMS Create 10000 policies and restart, test polices persistency"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR PA RICSIM SDNC NGW"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES="CP CR PA RICSIM SDNC KUBEPROXY NGW"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES="NGW"
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ONAP-ISTANBUL ORAN-DAWN"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh  $@
+. ../common/agent_api_functions.sh
+. ../common/ricsimulator_api_functions.sh
+. ../common/control_panel_api_functions.sh
+. ../common/controller_api_functions.sh
+. ../common/consul_cbs_functions.sh
+. ../common/cr_api_functions.sh
+. ../common/kube_proxy_api_functions.sh
+. ../common/gateway_api_functions.sh
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+#Local vars in test script
+##########################
+
+# Tested variants of REST/DMAAP/SDNC config
+TESTED_VARIANTS="REST"
+
+#Test agent and simulator protocol versions (others are http only)
+TESTED_PROTOCOLS="HTTP"
+
+NUM_RICS=5
+NUM_POLICIES_PER_RIC=2000
+
+
+generate_policy_uuid
+
+if [ "$PMS_VERSION" == "V2" ]; then
+    notificationurl=$CR_SERVICE_PATH"/test"
+else
+    notificationurl=""
+fi
+
+for __httpx in $TESTED_PROTOCOLS ; do
+    for interface in $TESTED_VARIANTS ; do
+
+        echo "#####################################################################"
+        echo "#####################################################################"
+        echo "### Testing agent: "$interface" and "$__httpx
+        echo "#####################################################################"
+        echo "#####################################################################"
+
+        if [ $__httpx == "HTTPS" ]; then
+            use_cr_https
+            use_simulator_https
+            if [[ $interface = *"SDNC"* ]]; then
+                use_sdnc_https
+            fi
+            use_agent_rest_https
+        else
+            use_cr_http
+            use_simulator_http
+            if [[ $interface = *"SDNC"* ]]; then
+                use_sdnc_http
+            fi
+            use_agent_rest_http
+        fi
+
+        # Clean container and start all needed containers #
+        clean_environment
+
+        if [ $RUNMODE == "KUBE" ]; then
+            start_kube_proxy
+        fi
+
+        start_ric_simulators ricsim_g1 $NUM_RICS STD_2.0.0
+
+        start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
+
+        if [ ! -z "$NRT_GATEWAY_APP_NAME" ]; then
+            start_gateway $SIM_GROUP/$NRT_GATEWAY_COMPOSE_DIR/$NRT_GATEWAY_CONFIG_FILE
+        fi
+
+        start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
+
+        set_agent_debug
+
+        if [ $RUNMODE == "DOCKER" ]; then
+            start_consul_cbs
+        fi
+
+        if [[ $interface = *"SDNC"* ]]; then
+            start_sdnc
+            prepare_consul_config      SDNC  ".consul_config.json"
+        else
+            prepare_consul_config      NOSDNC  ".consul_config.json"
+        fi
+
+        if [ $RUNMODE == "KUBE" ]; then
+            agent_load_config                       ".consul_config.json"
+        else
+            consul_config_app                      ".consul_config.json"
+        fi
+
+        start_cr
+
+        api_get_status 200
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_print ricsim_g1_$i interface
+        done
+
+        echo "Load policy type in group 1 simulators"
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_put_policy_type 201 ricsim_g1_$i STD_QOS_0_2_0 testdata/STD2/sim_qos.json
+        done
+
+        if [ "$PMS_VERSION" == "V2" ]; then
+            api_equal json:policy-types 1 300  #Wait for the agent to refresh types from the simulator
+        else
+            api_equal json:policy_types 1 300  #Wait for the agent to refresh types from the simulator
+        fi
+
+        api_put_service 201 "serv1" 600 "$CR_SERVICE_PATH/1"
+
+        echo "Check the number of types in the agent for each ric is 1"
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            if [ "$PMS_VERSION" == "V2" ]; then
+                api_equal json:policy-types?ric_id=ricsim_g1_$i 1 120
+            else
+                api_equal json:policy_types?ric=ricsim_g1_$i 1 120
+            fi
+        done
+
+        START_ID=2000
+
+        start_timer "Create $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_put_policy_parallel 201 "serv1" ricsim_g1_ $NUM_RICS STD_QOS_0_2_0 $START_ID NOTRANSIENT $notificationurl testdata/STD/pi1_template.json $NUM_POLICIES_PER_RIC 7
+
+        print_timer "Create $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        INSTANCES=$(($NUM_RICS*$NUM_POLICIES_PER_RIC))
+        api_equal json:policies $INSTANCES
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances $NUM_POLICIES_PER_RIC
+        done
+
+        stop_policy_agent
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        api_equal json:policies $INSTANCES 500
+
+        stop_policy_agent
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_post_delete_instances 200 ricsim_g1_$i
+        done
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        start_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
+
+
+        api_equal json:policies $INSTANCES 500
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances $NUM_POLICIES_PER_RIC 500
+        done
+
+        print_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
+
+        start_timer "Delete $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_delete_policy_parallel 204 $NUM_RICS $START_ID $NUM_POLICIES_PER_RIC 7
+
+        print_timer "Delete $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices over $interface using "$__httpx
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        stop_policy_agent
+
+        start_stopped_policy_agent
+
+        set_agent_debug
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+        sleep_wait 200
+
+        api_equal json:policies 0
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            sim_equal ricsim_g1_$i num_instances 0
+        done
+
+
+        for ((i=1; i<=$NUM_RICS; i++))
+        do
+            if [ $interface == "REST+SDNC" ]; then
+                sim_contains_str ricsim_g1_$i remote_hosts $SDNC_APP_NAME
+            else
+                sim_contains_str ricsim_g1_$i remote_hosts $POLICY_AGENT_APP_NAME
+            fi
+        done
+
+        check_policy_agent_logs
+        if [[ $interface = *"SDNC"* ]]; then
+            check_sdnc_logs
+        fi
+
+        store_logs          "${__httpx}__${interface}"
+
+    done
+
+done
+
+
+#### TEST COMPLETE ####
+
+print_result
+
+auto_clean_environment
\ No newline at end of file
index 20ece58..7398e3a 100755 (executable)
@@ -64,10 +64,6 @@ TESTED_PROTOCOLS="HTTP HTTPS"
 NUM_RICS=20
 NUM_POLICIES_PER_RIC=500
 
-
-NUM_RICS=8
-NUM_POLICIES_PER_RIC=11
-
 generate_policy_uuid
 
 if [ "$PMS_VERSION" == "V2" ]; then
index 99ee015..8553519 100644 (file)
@@ -146,8 +146,8 @@ The script can be started with these arguments
 | `--use-snapshot-image` | The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names |
 | `--use-staging-image` | The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names |
 | `--use-release-image` | The script will use images from the nexus release repo for the supplied apps, space separated list of app short names |
-| `--image-repo` |  Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo
-
+| `--image-repo` |  Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo |
+| `--cluster-timeout` |  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds |
 | `help` | Print this info along with the test script description and the list of app short names supported |
 
 ## Function: setup_testenvironment
@@ -1004,8 +1004,14 @@ Start the ECS container in docker or kube depending on running mode.
 |--|
 | None |
 
-## Function: restart_ecs ##
-Restart the ECS container.
+## Function: stop_ecs ##
+Stop the ECS container.
+| arg list |
+|--|
+| None |
+
+## Function: start_stopped_ecs ##
+Start a previously stopped ecs.
 | arg list |
 |--|
 | None |
index 0c2e48a..cb48d78 100644 (file)
@@ -96,6 +96,9 @@ PA_ADAPTER=$PA_PATH
 # Make curl retries towards the agent for http response codes set in this env var, space separated list of codes
 AGENT_RETRY_CODES=""
 
+#Save first worker node the pod is started on
+__PA_WORKER_NODE=""
+
 ###########################
 ### Policy Agents functions
 ###########################
@@ -203,6 +206,13 @@ start_policy_agent() {
                        export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config"
                        export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data"
                        export POLICY_AGENT_PKG_NAME
+
+                       export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv"
+                       export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc"
+                       ##Create a unique path for the pv each time to prevent a previous volume to be reused
+                       export POLICY_AGENT_PV_PATH="padata-"$(date +%s)
+                       export POLICY_AGENT_CONTAINER_MNT_DIR
+
                        if [ $1 == "PROXY" ]; then
                                AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
                                AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started
@@ -237,6 +247,16 @@ start_policy_agent() {
                        output_yaml=$PWD/tmp/pa_cfd.yaml
                        __kube_create_configmap $POLICY_AGENT_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest PA $data_json $output_yaml
 
+                       ## Create pv
+                       input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pv.yaml
+                       output_yaml=$PWD/tmp/pa_pv.yaml
+                       __kube_create_instance pv $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
+                       ## Create pvc
+                       input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pvc.yaml
+                       output_yaml=$PWD/tmp/pa_pvc.yaml
+                       __kube_create_instance pvc $POLICY_AGENT_APP_NAME $input_yaml $output_yaml
+
                        # Create service
                        input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"svc.yaml
                        output_yaml=$PWD/tmp/pa_svc.yaml
@@ -249,6 +269,12 @@ start_policy_agent() {
 
                fi
 
+               # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+               __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+               if [ -z "$__PA_WORKER_NODE" ]; then
+                       echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+               fi
+
                echo " Retrieving host and ports for service..."
                PA_HOST_NAME=$(__kube_get_service_host $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                POLICY_AGENT_EXTERNAL_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -274,6 +300,25 @@ start_policy_agent() {
                        exit
                fi
 
+               curdir=$PWD
+               cd $SIM_GROUP
+               cd policy_agent
+               cd $POLICY_AGENT_HOST_MNT_DIR
+               #cd ..
+               if [ -d db ]; then
+                       if [ "$(ls -A $DIR)" ]; then
+                               echo -e $BOLD" Cleaning files in mounted dir: $PWD/db"$EBOLD
+                               rm -rf db/*  &> /dev/null
+                               if [ $? -ne 0 ]; then
+                                       echo -e $RED" Cannot remove database files in: $PWD"$ERED
+                                       exit 1
+                               fi
+                       fi
+               else
+                       echo " No files in mounted dir or dir does not exists"
+               fi
+               cd $curdir
+
                #Export all vars needed for docker-compose
                export POLICY_AGENT_APP_NAME
                export POLICY_AGENT_APP_NAME_ALIAS
@@ -291,6 +336,7 @@ start_policy_agent() {
                export POLICY_AGENT_CONFIG_FILE
                export POLICY_AGENT_PKG_NAME
                export POLICY_AGENT_DISPLAY_NAME
+               export POLICY_AGENT_CONTAINER_MNT_DIR
 
                if [ $1 == "PROXY" ]; then
                        AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT  #Set if proxy is started
@@ -320,6 +366,79 @@ start_policy_agent() {
        return 0
 }
 
+# Stop the policy agent
+# args: -
+# args: -
+# (Function for test scripts)
+stop_policy_agent() {
+       echo -e $BOLD"Stopping $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+               __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
+               echo "  Deleting the replica set - a new will be started when the app is started"
+               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
+               if [ $? -ne 0 ]; then
+                       echo -e $RED" Could not delete replica set "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       else
+               docker stop $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not stop $POLICY_AGENT_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
+       echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+       echo ""
+       return 0
+}
+
+# Start a previously stopped policy agent
+# args: -
+# (Function for test scripts)
+start_stopped_policy_agent() {
+       echo -e $BOLD"Starting (the previously stopped) $POLICY_AGENT_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Tie the PMS to the same worker node it was initially started on
+               # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+               if [ -z "$__PA_WORKER_NODE" ]; then
+                       echo -e $RED" No initial worker node found for pod "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               else
+                       echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+                       echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+                       tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
+                       if [ $? -ne 0 ]; then
+                               echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+                       __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+       else
+               docker start $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not start (the stopped) $POLICY_AGENT_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
+       __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
+       echo ""
+       return 0
+}
+
+
+
 # Load the the appl config for the agent into a config map
 agent_load_config() {
        echo -e $BOLD"Agent - load config from "$EBOLD$1
index 437b207..134f50c 100644 (file)
@@ -35,7 +35,7 @@ __CR_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __CR_imagepull() {
-       echo -e $RED" Image for app CR shall never be pulled from remove repo"$ERED
+       echo -e $RED" Image for app CR shall never be pulled from remote repo"$ERED
 }
 
 # Build image (only for simulator or interfaces stubs owned by the test environment)
index 4ce8bc4..ec69e13 100644 (file)
@@ -84,7 +84,7 @@ try:
                         retry_cnt -= 1
                         total_retry_count += 1
                     else:
-                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code))
+                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code)+str(resp.raw))
                         sys.exit()
                 else:
                     retry_cnt=-1
index 525ac8b..ba6af92 100644 (file)
@@ -96,6 +96,9 @@ ECS_ADAPTER=$ECS_PATH
 # Make curl retries towards ECS for http response codes set in this env var, space separated list of codes
 ECS_RETRY_CODES=""
 
+#Save first worker node the pod is started on
+__ECS_WORKER_NODE=""
+
 ###########################
 ### ECS functions
 ###########################
@@ -205,6 +208,7 @@ start_ecs() {
                        export ECS_CONTAINER_MNT_DIR
 
                        export ECS_DATA_PV_NAME=$ECS_APP_NAME"-pv"
+                       export ECS_DATA_PVC_NAME=$ECS_APP_NAME"-pvc"
                        #Create a unique path for the pv each time to prevent a previous volume to be reused
                        export ECS_PV_PATH="ecsdata-"$(date +%s)
 
@@ -251,6 +255,15 @@ start_ecs() {
                        __kube_create_instance app $ECS_APP_NAME $input_yaml $output_yaml
                fi
 
+               # Tie the ECS to a worker node so that ECS will always be scheduled to the same worker node if the ECS pod is restarted
+               # A PVC of type hostPath is mounted to ECS, for persistent storage, so the ECS must always be on the node which mounted the volume
+
+               # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
+               __ECS_WORKER_NODE=$(kubectl get pod -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+               if [ -z "$__ECS_WORKER_NODE" ]; then
+                       echo -e $YELLOW" Cannot find worker node for pod for $ECS_APP_NAME, persistency may not work"$EYELLOW
+               fi
+
                echo " Retrieving host and ports for service..."
                ECS_HOST_NAME=$(__kube_get_service_host $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
                ECS_EXTERNAL_PORT=$(__kube_get_service_port $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http")
@@ -337,20 +350,73 @@ start_ecs() {
        return 0
 }
 
-# Restart ECS
+# Stop the ecs
+# args: -
 # args: -
 # (Function for test scripts)
-restart_ecs() {
-       echo -e $BOLD"Re-starting ECS"$EBOLD
-       docker restart $ECS_APP_NAME &> ./tmp/.dockererr
-       if [ $? -ne 0 ]; then
-               __print_err "Could not restart $ECS_APP_NAME" $@
-               cat ./tmp/.dockererr
-               ((RES_CONF_FAIL++))
-               return 1
+stop_ecs() {
+       echo -e $BOLD"Stopping $ECS_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+               __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ECS
+               echo "  Deleting the replica set - a new will be started when the app is started"
+               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ECS")
+               if [ $? -ne 0 ]; then
+                       echo -e $RED" Could not delete replica set "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       else
+               docker stop $ECS_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not stop $ECS_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
        fi
+       echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD
+       echo ""
+       return 0
+}
 
+# Start a previously stopped ecs
+# args: -
+# (Function for test scripts)
+start_stopped_ecs() {
+       echo -e $BOLD"Starting (the previously stopped) $ECS_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Tie the PMS to the same worker node it was initially started on
+               # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+               if [ -z "$__ECS_WORKER_NODE" ]; then
+                       echo -e $RED" No initial worker node found for pod "$RED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               else
+                       echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ECS_WORKER_NODE to deployment for $ECS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+                       echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
+                       tmp=$(kubectl patch deployment $ECS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ECS_WORKER_NODE'"}}}}}')
+                       if [ $? -ne 0 ]; then
+                               echo -e $YELLOW" Cannot set nodeSelector to deployment for $ECS_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+                       __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+       else
+               docker start $ECS_APP_NAME &> ./tmp/.dockererr
+               if [ $? -ne 0 ]; then
+                       __print_err "Could not start (the stopped) $ECS_APP_NAME" $@
+                       cat ./tmp/.dockererr
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
        __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
        echo ""
        return 0
 }
index 374f3ec..5cac74c 100644 (file)
@@ -83,14 +83,6 @@ __KUBEPROXY_store_docker_logs() {
 
 #######################################################
 
-
-## Access to Kube Http Proxy
-# Host name may be changed if app started by kube
-# Direct access from script
-#BMXX  KUBE_PROXY_HTTPX="http"
-#BMXX KUBE_PROXY_HOST_NAME=$LOCALHOST_NAME
-#BMXX KUBE_PROXY_PATH=$KUBE_PROXY_HTTPX"://"$KUBE_PROXY_HOST_NAME":"$KUBE_PROXY_WEB_EXTERNAL_PORT
-
 #########################
 ### Http Proxy functions
 #########################
@@ -157,27 +149,56 @@ start_kube_proxy() {
                echo " Retrieving host and ports for service..."
 
                CLUSTER_KUBE_PROXY="http"
-               CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
-               if [[ $CLUSTER_KUBE_PROXY_HOST == *"kubernetes"* ]]; then
-                       echo -e $YELLOW" The cluster host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"
+
+               #Finding host of the proxy
+               echo "  Trying to find svc hostname..."
+               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
+
+
+               if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
+                       #Local host found
+                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"$EYELLOW
                        CLUSTER_KUBE_PROXY_HOST="127.0.0.1"
+               else
+                       if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                               #Host of proxy not found, trying to find the ip....
+                               echo "  Trying to find svc ip..."
+                               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].ip}")
+                               if [ ! -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                                       #Host ip found
+                                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                               fi
+                       else
+                               #Host or ip of proxy found
+                               echo -e $YELLOW" The test environment host/ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                       fi
+               fi
+               if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
+                       #Host/ip of proxy not found, try to use the cluster and the nodeports of the proxy
+                       CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+                       echo -e $YELLOW" The test environment cluster ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
+                       CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
+                       KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
+                       echo " Cluster ip/host, cluster http nodeport, cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
+               else
+                       #Find the service ports of the proxy
+                       CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
+                       KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_port $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
+                       echo " Proxy ip/host, proxy http port, proxy web port: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_PORT $KUBE_PROXY_WEB_NODEPORT"
                fi
-               CLUSTER_KUBE_PROXY_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http")  # port for proxy access
-               KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web")  # web port, only for alive test
 
                KUBE_PROXY_WEB_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$KUBE_PROXY_WEB_NODEPORT
 
-               echo " Cluster ip/host, cluster http nodeport cluster web nodeport: $CLUSTER_KUBE_PROXY_HOST $CLUSTER_KUBE_PROXY_NODEPORT $KUBE_PROXY_WEB_NODEPORT"
-
                export KUBE_PROXY_PATH=  # Make sure proxy is empty when checking the proxy itself
                __check_service_start $KUBE_PROXY_APP_NAME $KUBE_PROXY_WEB_PATH$KUBE_PROXY_ALIVE_URL
 
                # Set proxy for all subsequent calls for all services etc
-               export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_NODEPORT
+               export KUBE_PROXY_PATH=$CLUSTER_KUBE_PROXY"://"$CLUSTER_KUBE_PROXY_HOST":"$CLUSTER_KUBE_PROXY_PORT
 
        else
                echo $YELLOW" Kube http proxy not needed in docker test. App not started"
        fi
        echo ""
+
 }
 
index 1c2f155..25b5172 100644 (file)
@@ -44,7 +44,7 @@ __DMAAPMR_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __MR_imagepull() {
-       echo -e $RED"Image for app CR shall never be pulled from remove repo"$ERED
+       echo -e $RED"Image for app MR shall never be pulled from remote repo"$ERED
 }
 
 # Pull image from remote repo or use locally built image
index ae3f193..744e357 100644 (file)
@@ -35,7 +35,7 @@ __PRODSTUB_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __PRODSTUB_imagepull() {
-       echo -e $RED"Image for app PRODSTUB shall never be pulled from remove repo"$ERED
+       echo -e $RED"Image for app PRODSTUB shall never be pulled from remote repo"$ERED
 }
 
 # Build image (only for simulator or interfaces stubs owned by the test environment)
index bf30310..785ff9a 100644 (file)
@@ -314,7 +314,7 @@ __find_sim_port() {
 __find_sim_host() {
        if [ $RUNMODE == "KUBE" ]; then
                ricname=$(echo "$1" | tr '_' '-')
-               for timeout in {1..60}; do
+               for timeout in {1..500}; do   # long waiting time needed in case of starting large number of sims
                        host=$(kubectl get pod $ricname  -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.status.podIP}' 2> /dev/null)
                        if [ ! -z "$host" ]; then
                                echo $RIC_SIM_HTTPX"://"$host":"$RIC_SIM_PORT
index 1890b71..7ecd434 100755 (executable)
@@ -226,6 +226,7 @@ POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in containe
 POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
 POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
 ECS_APP_NAME="enrichmentservice"                         # Name for ECS container
 ECS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ECS container
index b232577..1f6d135 100755 (executable)
@@ -27,6 +27,7 @@ __print_args() {
        echo "Args: remote|remote-remove docker|kube --env-file <environment-filename> [release] [auto-clean] [--stop-at-error] "
        echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
        echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
+       echo "      [--cluster-timeout <timeout-in seconds>]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -51,7 +52,8 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "--use-snapshot-image  -  The script will use images from the nexus snapshot repo for the supplied apps, space separated list of app short names"
        echo "--use-staging-image   -  The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names"
        echo "--use-release-image   -  The script will use images from the nexus release repo for the supplied apps, space separated list of app short names"
-       echo "--image-repo          -  Url to image repo. Only required in when running in multi-node kube cluster, otherwise optional. All used images will be re-tagged and pushed to this repo"
+       echo "--image-repo          -  Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo"
+       echo "--cluster-timeout     -  Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds. "
        echo ""
        echo "List of app short names supported: "$APP_SHORT_NAMES
        exit 0
@@ -303,7 +305,7 @@ echo -e "Activity \t Duration" > $TIMER_MEASUREMENTS
 
 # If this is set, all used images will be re-tagged and pushed to this repo before any
 IMAGE_REPO_ADR=""
-
+CLUSTER_TIME_OUT=0
 
 echo "-------------------------------------------------------------------------------------------------"
 echo "-----------------------------------      Test case: "$ATC
@@ -523,6 +525,32 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do
                        fi
                fi
        fi
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--cluster-timeout" ]; then
+                       shift;
+                       CLUSTER_TIME_OUT=$1
+                       if [ -z "$1" ]; then
+                               paramerror=1
+                               if [ -z "$paramerror_str" ]; then
+                                       paramerror_str="No timeout value found for : '--cluster-timeout'"
+                               fi
+                       else
+                               #Check if positive int
+                               case ${CLUSTER_TIME_OUT#[+]} in
+                                       *[!0-9]* | '')
+                                               paramerror=1
+                                               if [ -z "$paramerror_str" ]; then
+                                                       paramerror_str="Value for '--cluster-timeout' not an int : "$CLUSTER_TIME_OUT
+                                               fi
+                                               ;;
+                                       * ) ;; # Ok
+                               esac
+                               echo "Option set - Cluster timeout: "$1
+                               shift;
+                               foundparm=0
+                       fi
+               fi
+       fi
 done
 echo ""
 
@@ -707,7 +735,7 @@ __check_and_create_image_var() {
        echo -e "$tmp" >> $image_list_file
        #Export the env var
        export "${2}"=$image":"$tag  #Note, this var may be set to the value of the target value below in __check_and_pull_image
-       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+       if [ ! -z "$IMAGE_REPO_ADR" ] && [ $5 == "LOCAL" ]; then    # Only push local images if repo is given
                export "${2}_SOURCE"=$image":"$tag  #Var to keep the actual source image
                export "${2}_TARGET"=$IMAGE_REPO_ADR"/"$optional_image_repo_target":"$tag  #Create image + tag for optional image repo - pushed later if needed
        else
@@ -1047,40 +1075,44 @@ setup_testenvironment() {
 
        # The following sequence pull the configured images
 
-       echo -e $BOLD"Pulling configured images, if needed"$EBOLD
 
-       for imagename in $APP_SHORT_NAMES; do
-               __check_included_image $imagename
-               incl=$?
-               __check_project_image $imagename
-               proj=$?
-               if [ $incl -eq 0 ]; then
-                       if [ $proj -eq 0 ]; then
-                               START_ARG_MOD=$START_ARG
-                               __check_image_local_override $imagename
-                               if [ $? -eq 1 ]; then
-                                       START_ARG_MOD="local"
+       echo -e $BOLD"Pulling configured images, if needed"$EBOLD
+       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+               echo -e $YELLOW" Excluding all remote image check/pull when running with image repo: $IMAGE_REPO_ADR"$EYELLOW
+       else
+               for imagename in $APP_SHORT_NAMES; do
+                       __check_included_image $imagename
+                       incl=$?
+                       __check_project_image $imagename
+                       proj=$?
+                       if [ $incl -eq 0 ]; then
+                               if [ $proj -eq 0 ]; then
+                                       START_ARG_MOD=$START_ARG
+                                       __check_image_local_override $imagename
+                                       if [ $? -eq 1 ]; then
+                                               START_ARG_MOD="local"
+                                       fi
+                               else
+                                       START_ARG_MOD=$START_ARG
+                               fi
+                               __check_image_local_build $imagename
+                               #No pull of images built locally
+                               if [ $? -ne 0 ]; then
+                                       # A function name is created from the app short name
+                                       # for example app short name 'HTTPPROXY' -> produce the function
+                                       # name __HTTPPROXY_imagesetup
+                                       # This function is called and is expected to exist in the imported
+                                       # file for the httpproxy test functions
+                                       # The resulting function impl will call '__check_and_pull_image' function
+                                       # with appropriate parameters
+                                       function_pointer="__"$imagename"_imagepull"
+                                       $function_pointer $START_ARG_MOD $START_ARG
                                fi
                        else
-                               START_ARG_MOD=$START_ARG
-                       fi
-                       __check_image_local_build $imagename
-                       #No pull of images built locally
-                       if [ $? -ne 0 ]; then
-                               # A function name is created from the app short name
-                               # for example app short name 'HTTPPROXY' -> produce the function
-                               # name __HTTPPROXY_imagesetup
-                               # This function is called and is expected to exist in the imported
-                               # file for the httpproxy test functions
-                               # The resulting function impl will call '__check_and_pull_image' function
-                               # with appropriate parameters
-                               function_pointer="__"$imagename"_imagepull"
-                               $function_pointer $START_ARG_MOD $START_ARG
+                               echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
                        fi
-               else
-                       echo -e $YELLOW" Excluding $imagename image from image check/pull"$EYELLOW
-               fi
-       done
+               done
+       fi
 
        #Errors in image setting - exit
        if [ $IMAGE_ERR -ne 0 ]; then
@@ -1126,8 +1158,8 @@ setup_testenvironment() {
 
        echo ""
 
-       # Create a table of the images used in the script
-       echo -e $BOLD"Local docker registry images used in the this test script"$EBOLD
+       # Create a table of the images used in the script - from local repo
+       echo -e $BOLD"Local docker registry images used in this test script"$EBOLD
 
        docker_tmp_file=./tmp/.docker-images-table
        format_string="{{.Repository}}\\t{{.Tag}}\\t{{.CreatedSince}}\\t{{.Size}}\\t{{.CreatedAt}}"
@@ -1136,40 +1168,85 @@ setup_testenvironment() {
        for imagename in $APP_SHORT_NAMES; do
                __check_included_image $imagename
                if [ $? -eq 0 ]; then
-                       # A function name is created from the app short name
-                       # for example app short name 'MR' -> produce the function
-                       # name __MR_imagebuild
-                       # This function is called and is expected to exist in the imported
-                       # file for the mr test functions
-                       # The resulting function impl shall build the imagee
-                       function_pointer="__"$imagename"_image_data"
-                       $function_pointer "$format_string" $docker_tmp_file
+                       # Only print image data if image repo is null, or if image repo is set and image is local
+                       print_image_data=0
+                       if [ -z "$IMAGE_REPO_ADR" ]; then
+                               print_image_data=1
+                       else
+                               __check_image_local_build $imagename
+                               if [ $? -eq 0 ]; then
+                                       print_image_data=1
+                               fi
+                       fi
+                       if [ $print_image_data -eq 1 ]; then
+                               # A function name is created from the app short name
+                               # for example app short name 'MR' -> produce the function
+                               # name __MR_imagebuild
+                               # This function is called and is expected to exist in the imported
+                               # file for the mr test functions
+                               # The resulting function impl shall build the imagee
+                               function_pointer="__"$imagename"_image_data"
+                               $function_pointer "$format_string" $docker_tmp_file
+                       fi
                fi
        done
 
-
        column -t -s $'\t' $docker_tmp_file | indent1
 
        echo ""
+
+       if [ ! -z "$IMAGE_REPO_ADR" ]; then
+
+               # Create a table of the images used in the script - from remote repo
+               echo -e $BOLD"Remote repo images used in this test script"$EBOLD
+               echo -e $YELLOW"-- Note: These image will be pulled when the container starts. Images not managed by the test engine --"$EYELLOW
+
+               docker_tmp_file=./tmp/.docker-images-table
+               format_string="{{.Repository}}\\t{{.Tag}}"
+               echo -e "Application\tRepository\tTag" > $docker_tmp_file
+
+               for imagename in $APP_SHORT_NAMES; do
+                       __check_included_image $imagename
+                       if [ $? -eq 0 ]; then
+                               # Only print image data if image repo is null, or if image repo is set and image is local
+                               __check_image_local_build $imagename
+                               if [ $? -ne 0 ]; then
+                                       # A function name is created from the app short name
+                                       # for example app short name 'MR' -> produce the function
+                                       # name __MR_imagebuild
+                                       # This function is called and is expected to exist in the imported
+                                       # file for the mr test functions
+                                       # The resulting function impl shall build the imagee
+                                       function_pointer="__"$imagename"_image_data"
+                                       $function_pointer "$format_string" $docker_tmp_file
+                               fi
+                       fi
+               done
+
+               column -t -s $'\t' $docker_tmp_file | indent1
+
+               echo ""
+       fi
+
        if [ $RUNMODE == "KUBE" ]; then
 
                echo "================================================================================="
                echo "================================================================================="
 
-               CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
-               if [[ $CLUSTER_IP != *"kubernetes"* ]]; then
-                       echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a multi-node cluster."$EYELLOW
-                       echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
+               if [ -z "$IMAGE_REPO_ADR" ]; then
+                       echo -e $YELLOW" The image pull policy is set to 'Never' - assuming a local image repo is available for all images"$EYELLOW
+                       echo -e " This setting only works on single node clusters on the local machine"
+                       echo -e " It does not work with multi-node clusters or remote clusters. "
                        export KUBE_IMAGE_PULL_POLICY="Never"
-                       if [ -z "$IMAGE_REPO_ADR" ]; then
-                               echo -e $RED" The flag --image-repo need to be provided to the cmd with the path to a custom image repo'."$ERED
-                               exit 1
-                       fi
                else
-                       echo -e $YELLOW" The cluster ip is: $CLUSTER_IP. This kubernetes is likely a single-node cluster on a local machine."$EYELLOW
-                       echo -e $YELLOW" The image pull policy is set to 'Never'."$EYELLOW
-                       export KUBE_IMAGE_PULL_POLICY="Never"
+                       echo -e $YELLOW" The image pull policy is set to 'Always'"$EYELLOW
+                       echo -e " This setting work on local clusters, multi-node clusters and remote cluster. "
+                       echo -e " Only locally built images are managed. Remote images are always pulled from remote repos"
+                       echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine"
+                       export KUBE_IMAGE_PULL_POLICY="Always"
                fi
+               CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+               echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW
 
                echo "================================================================================="
                echo "================================================================================="
@@ -1292,7 +1369,7 @@ print_result() {
 start_timer() {
        echo -e $BOLD"INFO(${BASH_LINENO[0]}): "${FUNCNAME[0]}"," $@ $EBOLD
        TC_TIMER=$SECONDS
-       echo " Timer started"
+       echo " Timer started: $(date)"
 }
 
 # Print the value of the time (in seconds)
@@ -1815,6 +1892,26 @@ __kube_create_configmap() {
        return 0
 }
 
+# This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
+# The function retries up to the timeout given in the cmd flag '--cluster-timeout'
+# args: <full kubectl cmd with parameters
+# (Not for test scripts)
+__kube_cmd_with_timeout() {
+       TS_TMP=$(($SECONDS+$CLUSTER_TIME_OUT))
+
+       while true; do
+               kube_cmd_result=$($@)
+               if [ $? -ne 0 ]; then
+                       kube_cmd_result=""
+               fi
+               if [ $SECONDS -ge $TS_TMP ] || [ ! -z "$kube_cmd_result" ] ; then
+                       echo $kube_cmd_result
+                       return 0
+               fi
+               sleep 1
+       done
+}
+
 # This function scales or deletes all resources for app selected by the testcase.
 # args: -
 # (Not for test scripts)
@@ -2049,7 +2146,7 @@ __check_service_start() {
        TSTART=$SECONDS
        loop_ctr=0
        while (( $TSTART+600 > $SECONDS )); do
-               result="$(__do_curl $url)"
+               result="$(__do_curl -m 10 $url)"
                if [ $? -eq 0 ]; then
                        if [ ${#result} -gt 15 ]; then
                                #If response is too long, truncate
@@ -2197,7 +2294,13 @@ __do_curl() {
        curlString="curl -skw %{http_code} $proxyflag $@"
        echo " CMD: $curlString" >> $HTTPLOG
        res=$($curlString)
+       retcode=$?
        echo " RESP: $res" >> $HTTPLOG
+       echo " RETCODE: $retcode" >> $HTTPLOG
+       if [ $retcode -ne 0 ]; then
+               echo "<no-response-from-server>"
+               return 1
+       fi
        http_code="${res:${#res}-3}"
        if [ ${#res} -eq 3 ]; then
                if [ $http_code -lt 200 ] || [ $http_code -gt 299 ]; then
index bc1bad9..f1090ce 100644 (file)
@@ -38,6 +38,8 @@ spec:
           name: $ECS_CONFIG_CONFIGMAP_NAME
         name: ecs-conf-name
       - persistentVolumeClaim:
-          claimName: $ECS_DATA_CONFIGMAP_NAME
+          claimName: $ECS_DATA_PVC_NAME
         name: ecs-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
 
index dca9f1c..7f07893 100644 (file)
@@ -1,7 +1,7 @@
 apiVersion: v1
 kind: PersistentVolumeClaim
 metadata:
-  name: $ECS_DATA_CONFIGMAP_NAME
+  name: $ECS_DATA_PVC_NAME
   namespace: $KUBE_NONRTRIC_NAMESPACE
   labels:
     run: $ECS_APP_NAME
index ea04483..51cf745 100644 (file)
@@ -7,7 +7,7 @@ metadata:
     run: $KUBE_PROXY_APP_NAME
     autotest: KUBEPROXY
 spec:
-  type: NodePort
+  type: LoadBalancer
   ports:
   - port: $KUBE_PROXY_EXTERNAL_PORT
     targetPort: $KUBE_PROXY_INTERNAL_PORT
index bbe8fae..269d9ca 100644 (file)
@@ -17,6 +17,9 @@ spec:
         run: $POLICY_AGENT_APP_NAME
         autotest: PA
     spec:
+      securityContext:
+        runAsUser: 0
+# Need to run as root to be able to store files in dir mounted as a hostPath volume
       containers:
       - name: $POLICY_AGENT_APP_NAME
         image: $POLICY_AGENT_IMAGE
@@ -29,6 +32,10 @@ spec:
         volumeMounts:
         - mountPath: $POLICY_AGENT_CONFIG_MOUNT_PATH
           name: pa-conf-name
+#        volumeMounts:
+        - mountPath: $POLICY_AGENT_CONTAINER_MNT_DIR
+          name: pa-pv-data-name
+#        volumeMounts:
         - mountPath: $POLICY_AGENT_DATA_MOUNT_PATH
           name: pa-data-name
       volumes:
@@ -39,4 +46,10 @@ spec:
       - configMap:
           defaultMode: 420
           name: $POLICY_AGENT_DATA_CONFIGMAP_NAME
-        name: pa-data-name
\ No newline at end of file
+        name: pa-data-name
+      - persistentVolumeClaim:
+          claimName: $POLICY_AGENT_DATA_PVC_NAME
+        name: pa-pv-data-name
+# Selector will be set when pod is started first time
+      nodeSelector:
+
index 49111d8..5c5b08d 100644 (file)
@@ -53,6 +53,8 @@ app:
   # Location of the component configuration file. The file will only be used if the Consul database is not used;
   # configuration from the Consul will override the file.
   filepath: /opt/app/policy-agent/data/application_configuration.json
+  # path where the service can store data
+  vardata-directory: /var/policy-management-service
   webclient:
     # Configuration of the trust store used for the HTTP client (outgoing requests)
     # The file location and the password for the truststore is only relevant if trust-store-used == true
index e89fc86..2261151 100644 (file)
@@ -37,6 +37,7 @@ services:
       - HOSTNAME=${POLICY_AGENT_CONFIG_KEY}
     volumes:
     - ${POLICY_AGENT_HOST_MNT_DIR}/$POLICY_AGENT_CONFIG_FILE:${POLICY_AGENT_CONFIG_MOUNT_PATH}/$POLICY_AGENT_CONFIG_FILE
+    - ${POLICY_AGENT_HOST_MNT_DIR}/db:${POLICY_AGENT_CONTAINER_MNT_DIR}
     labels:
       - "nrttest_app=PA"
       - "nrttest_dp=${POLICY_AGENT_DISPLAY_NAME}"
diff --git a/test/simulator-group/policy_agent/pv.yaml b/test/simulator-group/policy_agent/pv.yaml
new file mode 100644 (file)
index 0000000..332b341
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: $POLICY_AGENT_DATA_PV_NAME
+  annotations:
+    pv.beta.kubernetes.io/gid: "999"
+  labels:
+    run: $POLICY_AGENT_APP_NAME
+    autotest: PA
+spec:
+  storageClassName: pa-standard
+  capacity:
+    storage: 1Mi
+  accessModes:
+    - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Delete
+  hostPath:
+    path: "/tmp/$POLICY_AGENT_PV_PATH"
diff --git a/test/simulator-group/policy_agent/pvc.yaml b/test/simulator-group/policy_agent/pvc.yaml
new file mode 100644 (file)
index 0000000..a62e130
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: $POLICY_AGENT_DATA_PVC_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $POLICY_AGENT_APP_NAME
+    autotest: PA
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Mi
+  storageClassName: pa-standard
+  volumeMode: Filesystem