Updated tests PMS persistency 96/5996/1
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Tue, 4 May 2021 07:21:24 +0000 (09:21 +0200)
committerBjornMagnussonXA <bjorn.magnusson@est.tech>
Tue, 4 May 2021 07:22:01 +0000 (09:22 +0200)
Replaced kubeproxy with more efficient proxy

Issue-ID: NONRTRIC-486
Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: Ib976cc2b2ef4856ba4c2665c90cf6b6525972f14

28 files changed:
test/auto-test/FTC805.sh
test/auto-test/FTC_HELM_RECIPE_CHERRY.sh [moved from test/auto-test/FTC_HELM-RECIPE.sh with 99% similarity]
test/auto-test/FTC_HELM_RECIPE_DAWN.sh [new file with mode: 0755]
test/common/agent_api_functions.sh
test/common/create_policies_process.py
test/common/delete_policies_process.py
test/common/ecs_api_functions.sh
test/common/kube_proxy_api_functions.sh
test/common/ricsimulator_api_functions.sh
test/common/test_env-onap-guilin.sh
test/common/test_env-onap-honolulu.sh
test/common/test_env-onap-istanbul.sh
test/common/test_env-oran-cherry.sh
test/common/test_env-oran-dawn.sh
test/common/testcase_common.sh
test/common/testengine_config.sh
test/http-https-proxy/.gitignore [new file with mode: 0644]
test/http-https-proxy/Dockerfile [new file with mode: 0644]
test/http-https-proxy/README.md [new file with mode: 0644]
test/http-https-proxy/basic_test.sh [new file with mode: 0755]
test/http-https-proxy/cert/cert.crt [new file with mode: 0644]
test/http-https-proxy/cert/generate_cert_and_key.sh [new file with mode: 0755]
test/http-https-proxy/cert/key.crt [new file with mode: 0644]
test/http-https-proxy/cert/pass [new file with mode: 0644]
test/http-https-proxy/http_proxy.js [new file with mode: 0644]
test/http-https-proxy/proxy-build-start.sh [new file with mode: 0755]
test/simulator-group/kubeproxy/app.yaml
test/simulator-group/pvc-cleaner/pvc-cleaner.yaml [new file with mode: 0644]

index 13f534e..c563bb0 100755 (executable)
@@ -208,7 +208,6 @@ for __httpx in $TESTED_PROTOCOLS ; do
 
         start_timer "Restore $((NUM_POLICIES_PER_RIC*$NUM_RICS)) polices after restart over $interface using "$__httpx
 
-
         api_equal json:policies $INSTANCES 500
 
         for ((i=1; i<=$NUM_RICS; i++))
@@ -253,7 +252,6 @@ for __httpx in $TESTED_PROTOCOLS ; do
             sim_equal ricsim_g1_$i num_instances 0
         done
 
-
         for ((i=1; i<=$NUM_RICS; i++))
         do
             if [ $interface == "REST+SDNC" ]; then
similarity index 99%
rename from test/auto-test/FTC_HELM-RECIPE.sh
rename to test/auto-test/FTC_HELM_RECIPE_CHERRY.sh
index cbebf60..7465b40 100755 (executable)
@@ -17,7 +17,7 @@
 #  ============LICENSE_END=================================================
 #
 
-TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components"
+TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components - CHERRY release"
 
 #App names to include in the test when running docker, space separated list
 DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
@@ -28,7 +28,7 @@ KUBE_INCLUDED_IMAGES=" MR CR  PRODSTUB KUBEPROXY"
 KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-CHERRY ORAN-DAWN"
+SUPPORTED_PROFILES="ORAN-CHERRY"
 #Supported run modes
 SUPPORTED_RUNMODES="KUBE"
 
diff --git a/test/auto-test/FTC_HELM_RECIPE_DAWN.sh b/test/auto-test/FTC_HELM_RECIPE_DAWN.sh
new file mode 100755 (executable)
index 0000000..7d8f079
--- /dev/null
@@ -0,0 +1,401 @@
+#!/usr/bin/env bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="Sanity test of Non-RT RIC Helm recepie - all components - DAWN release"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="" # Not used -  KUBE only test script
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES=" MR CR  PRODSTUB KUBEPROXY"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ECS RC SDNC"
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-DAWN"
+#Supported run modes
+SUPPORTED_RUNMODES="KUBE"
+
+. ../common/testcase_common.sh $@
+. ../common/agent_api_functions.sh
+. ../common/ricsimulator_api_functions.sh
+. ../common/ecs_api_functions.sh
+. ../common/prodstub_api_functions.sh
+. ../common/cr_api_functions.sh
+. ../common/rapp_catalogue_api_functions.sh
+. ../common/mr_api_functions.sh
+. ../common/control_panel_api_functions.sh
+. ../common/controller_api_functions.sh
+. ../common/kube_proxy_api_functions.sh
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+use_mr_http       #MR only supports http?
+use_cr_https
+use_agent_rest_https
+use_sdnc_https
+use_simulator_https
+use_ecs_rest_https
+use_prod_stub_https
+if [ $ECS_VERSION == "V1-1" ]; then
+    use_rapp_catalogue_http # https not yet supported
+else
+    ########################################use_rapp_catalogue_https
+    use_rapp_catalogue_http
+fi
+
+echo -e "$RED CHECK WHY RC HTTPS DOES NOT WORK $ERED"
+
+###############################use_control_panel_https
+use_control_panel_http
+
+if [ "$PMS_VERSION" == "V1" ]; then
+   echo "PMS VERSION 2 (V2) is required"
+   exit 1
+fi
+
+clean_environment
+
+pms_kube_pvc_reset
+
+ecs_kube_pvc_reset
+
+start_kube_proxy
+
+STD_NUM_RICS=2
+OSC_NUM_RICS=2
+
+start_ric_simulators a1-sim-osc $STD_NUM_RICS OSC_2.1.0
+echo " RIC MAPPING a1-sim-osc-0 : ric1"
+echo " RIC MAPPING a1-sim-osc-1 : ric2"
+
+start_ric_simulators a1-sim-std $STD_NUM_RICS STD_1.1.3
+echo " RIC MAPPING a1-sim-std-0 : ric3"
+echo " RIC MAPPING a1-sim-std-1 : ric4"
+
+start_ric_simulators a1-sim-std2 $STD_NUM_RICS STD_2.0.0
+echo " RIC MAPPING a1-sim-std2-0 : ric5"
+echo " RIC MAPPING a1-sim-std2-1 : ric6"
+
+start_mr
+
+start_control_panel
+
+start_sdnc
+
+start_policy_agent
+
+start_cr
+
+start_prod_stub
+
+start_ecs NOPROXY
+
+set_ecs_trace
+
+start_rapp_catalogue
+
+set_agent_trace
+
+#### Test RAPP Catalogue ####
+
+rapp_cat_api_get_services 200 EMPTY
+
+rapp_cat_api_put_service 201 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
+
+rapp_cat_api_get_services 200 "Emergency-response-app" v1 "Emergency-response-app" "Emergency-response-app"
+
+#Check the number of services
+rc_equal json:services 1
+
+api_get_status 200
+
+#### Test Policy Management Service ####
+
+# Print the A1 version for STD 1.1.X
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_print "a1-sim-std-"$i interface
+done
+
+# Print the A1 version for STD 2.0.X
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+   sim_print "a1-sim-std2-"$i interface
+done
+
+# Print the A1 version for OSC 2.1.X
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+    sim_print "a1-sim-osc-"$i interface
+done
+
+# Load the polictypes in STD 2
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS_0_2_0 testdata/STD2/sim_qos.json
+   sim_put_policy_type 201 "a1-sim-std2-"$i STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
+done
+
+# Load the polictypes in OSC
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+    sim_put_policy_type 201 "a1-sim-osc-"$i 1 testdata/OSC/sim_1.json
+    sim_put_policy_type 201 "a1-sim-osc-"$i 2 testdata/OSC/sim_2.json
+done
+
+# Check that all rics are synced in
+api_equal json:rics 6 300
+
+#Check the number of schemas and the individual schemas
+api_equal json:policy-types 5 300
+
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    ricid=$((3+$i))
+    api_equal json:policy-types?ric_id=ric$ricid 1 120
+done
+
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+   ricid=$((5+$i))
+   api_equal json:policy-types?ric_id=ric$ricid 2 120
+done
+
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+    ricid=$((1+$i))
+    api_equal json:policy-types?ric_id=ric$ricid 2 120
+done
+
+#Check the schemas in STD 2
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+   ricid=$((5+$i))
+   api_get_policy_type 200 STD_QOS_0_2_0 testdata/STD2/qos-agent-modified.json
+   api_get_policy_type 200 STD_QOS2_0.1.0 testdata/STD2/qos2-agent-modified.json
+done
+
+# Check the schemas in OSC
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+    api_get_policy_type 200 1 testdata/OSC/1-agent-modified.json
+    api_get_policy_type 200 2 testdata/OSC/2-agent-modified.json
+done
+
+if [ "$PMS_VERSION" == "V2" ]; then
+
+    api_equal json:policy-types 5 120
+
+    api_equal json:policies 0
+
+    api_equal json:policy-instances 0
+else
+
+    api_equal json:policy_schemas 5 120
+
+    api_equal json:policy_types 5
+
+    api_equal json:policies 0
+
+    api_equal json:policy_ids 0
+fi
+
+api_put_service 201 "Emergency-response-app" 0 "$CR_SERVICE_PATH/ER-app"
+
+# Create policies in STD
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    ricid=$((3+$i))
+    generate_policy_uuid
+    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
+    generate_policy_uuid
+    api_put_policy 201 "Emergency-response-app" ric$ricid NOTYPE $((1200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD/pi1_template.json 1
+done
+
+#Create policies in STD 2
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+   ricid=$((5+$i))
+   generate_policy_uuid
+   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS_0_2_0 $((2100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos_template.json 1
+   generate_policy_uuid
+   api_put_policy 201 "Emergency-response-app" ric$ricid STD_QOS2_0.1.0 $((2200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"std2" testdata/STD2/pi_qos2_template.json 1
+done
+
+# Create policies in OSC
+for ((i=0; i<$OSC_NUM_RICS; i++))
+do
+    ricid=$((1+$i))
+    generate_policy_uuid
+    api_put_policy 201 "Emergency-response-app" ric$ricid 1 $((3100+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi1_template.json 1
+    generate_policy_uuid
+    api_put_policy 201 "Emergency-response-app" ric$ricid 2 $((3200+$i)) NOTRANSIENT $CR_SERVICE_PATH/"osc" testdata/OSC/pi2_template.json 1
+done
+
+
+# Check the number of policies in STD and STD2
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-std-"$i num_instances 2
+    sim_equal "a1-sim-std2-"$i num_instances 2
+done
+
+# Check the number of policies in OSC
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-osc-"$i num_instances 2
+done
+
+stop_policy_agent
+
+start_stopped_policy_agent
+
+# Check PMS state after restart
+
+sleep_wait 200
+
+if [ "$PMS_VERSION" == "V2" ]; then
+
+    api_equal json:policy-types 5 120
+
+    api_equal json:policies 12
+
+    api_equal json:policy-instances 12
+else
+
+    api_equal json:policy_schemas 5 120
+
+    api_equal json:policy_types 5
+
+    api_equal json:policies 12
+
+    api_equal json:policy_ids 12
+fi
+
+# Check the number of policies in STD and STD2
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-std-"$i num_instances 2
+    sim_equal "a1-sim-std2-"$i num_instances 2
+done
+
+# Check the number of policies in OSC
+for ((i=0; i<$STD_NUM_RICS; i++))
+do
+    sim_equal "a1-sim-osc-"$i num_instances 2
+done
+
+
+echo "ADD EVENT/STATUS CHECK"
+echo "ADD MR CHECK"
+
+FLAT_A1_EI="1"
+
+ecs_api_admin_reset
+
+CB_JOB="$PROD_STUB_SERVICE_PATH$PROD_STUB_JOB_CALLBACK"
+CB_SV="$PROD_STUB_SERVICE_PATH$PROD_STUB_SUPERVISION_CALLBACK"
+TARGET1="$RIC_SIM_HTTPX://a1-sim-std2-0.a1-sim:$RIC_SIM_PORT/datadelivery"
+TARGET2="$RIC_SIM_HTTPX://a1-sim-std2-1.a1-sim:$RIC_SIM_PORT/datadelivery"
+
+STATUS1="$CR_SERVICE_PATH/job1-status"
+STATUS2="$CR_SERVICE_PATH/job2-status"
+
+prodstub_arm_producer 200 prod-a
+prodstub_arm_type 200 prod-a type1
+prodstub_arm_job_create 200 prod-a job1
+prodstub_arm_job_create 200 prod-a job2
+
+
+### ecs status
+ecs_api_service_status 200
+
+## Setup prod-a
+if [ $ECS_VERSION == "V1-1" ]; then
+    ecs_api_edp_put_producer 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
+
+    ecs_api_edp_get_producer 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1 testdata/ecs/ei-type-1.json
+else
+    ecs_api_edp_put_type_2 201 type1 testdata/ecs/ei-type-1.json
+    ecs_api_edp_get_type_2 200 type1
+    ecs_api_edp_get_type_ids 200 type1
+
+    ecs_api_edp_put_producer_2 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
+    ecs_api_edp_put_producer_2 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
+fi
+
+ecs_api_edp_get_producer_status 200 prod-a ENABLED
+
+
+## Create a job for prod-a
+## job1 - prod-a
+if [  -z "$FLAT_A1_EI" ]; then
+    ecs_api_a1_put_job 201 type1 job1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+else
+    ecs_api_a1_put_job 201 job1 type1 $TARGET1 ricsim_g3_1 $STATUS1 testdata/ecs/job-template.json
+fi
+
+# Check the job data in the producer
+if [ $ECS_VERSION == "V1-1" ]; then
+    prodstub_check_jobdata 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+else
+    prodstub_check_jobdata_2 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+fi
+
+## Create a second job for prod-a
+## job2 - prod-a
+if [  -z "$FLAT_A1_EI" ]; then
+    ecs_api_a1_put_job 201 type1 job2 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
+else
+    ecs_api_a1_put_job 201 job2 type1 $TARGET2 ricsim_g3_2 $STATUS2 testdata/ecs/job-template.json
+fi
+
+# Check the job data in the producer
+if [ $ECS_VERSION == "V1-1" ]; then
+    prodstub_check_jobdata 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
+else
+    prodstub_check_jobdata_2 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
+fi
+
+stop_ecs
+
+start_stopped_ecs
+
+# Check ECS status after restart
+
+if [  -z "$FLAT_A1_EI" ]; then
+    ecs_api_a1_get_job_status 200 type1 job1 DISABLED
+    ecs_api_a1_get_job_status 200 type1 job2 DISABLED
+else
+    ecs_api_a1_get_job_status 200 job1 DISABLED
+    ecs_api_a1_get_job_status 200 job2 DISABLED
+fi
+
+check_policy_agent_logs
+check_ecs_logs
+check_sdnc_logs
+
+#### TEST COMPLETE ####
+
+store_logs          END
+
+print_result
index cb48d78..82dde44 100644 (file)
@@ -270,9 +270,13 @@ start_policy_agent() {
                fi
 
                # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
-               __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
-               if [ -z "$__PA_WORKER_NODE" ]; then
-                       echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+               if [ $retcode_i -eq 0 ]; then
+                       __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+                       if [ -z "$__PA_WORKER_NODE" ]; then
+                               echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+               else
+                       echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                fi
 
                echo " Retrieving host and ports for service..."
@@ -374,6 +378,13 @@ stop_policy_agent() {
        echo -e $BOLD"Stopping $POLICY_AGENT_DISPLAY_NAME"$EBOLD
 
        if [ $RUNMODE == "KUBE" ]; then
+
+               __check_prestarted_image "PA"
+               if [ $? -eq 0 ]; then
+                       echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
+                       __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
+                       return 0
+               fi
                __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
                echo "  Deleting the replica set - a new will be started when the app is started"
                tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
@@ -404,6 +415,14 @@ start_stopped_policy_agent() {
 
        if [ $RUNMODE == "KUBE" ]; then
 
+               __check_prestarted_image "PA"
+               if [ $? -eq 0 ]; then
+                       echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
+                       __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+                       __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
+                       return 0
+               fi
+
                # Tie the PMS to the same worker node it was initially started on
                # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
                if [ -z "$__PA_WORKER_NODE" ]; then
@@ -419,7 +438,6 @@ start_stopped_policy_agent() {
                        fi
                        __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
                fi
-
        else
                docker start $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr
                if [ $? -ne 0 ]; then
@@ -2170,6 +2188,25 @@ api_get_configuration() {
                fi
        fi
 
+       __log_test_pass
+       return 0
+}
+
+##########################################
+####     Reset types and instances    ####
+##########################################
+
+# Admin reset to remove all policies and services
+# All types and instances etc are removed - types and instances in a1 sims need to be removed separately
+# NOTE - only works in kubernetes and the pod should not be running
+# args: -
+# (Function for test scripts)
+
+pms_kube_pvc_reset() {
+       __log_test_start $@
+
+       __kube_clean_pvc $POLICY_AGENT_APP_NAME nonrtric policymanagementservice-vardata-pvc /var/policy-management-service/database
+
        __log_test_pass
        return 0
 }
\ No newline at end of file
index b97904b..480d5cb 100644 (file)
@@ -89,7 +89,8 @@ try:
         start=start
         stop=count*num_rics+start
 
-        total_retry_count=0
+        http_retry_count=0
+        connect_retry_count=0
 
         for i in range(start,stop):
             if (i%pids == (pid_id-1)):
@@ -97,6 +98,7 @@ try:
                 ric_id=(i%num_rics)+1
                 ric=ric_base+str(ric_id)
 
+                connect_ok=False
                 retry_cnt=5
                 while(retry_cnt>0):
                     try:
@@ -128,31 +130,35 @@ try:
                             resp=requests.put(url, data_out, headers=headers, verify=False, timeout=90)
                         else:
                             resp=requests.put(url, data_out, headers=headers, verify=False, timeout=90, proxies=proxydict)
+                        connect_ok=True
                     except Exception as e1:
-                        print("1Put failed for id:"+uuid+str(i)+ ", "+str(e1) + " "+traceback.format_exc())
-                        sys.exit()
-
-                    if (resp.status_code == None):
-                        print("1Put failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: None")
-                        sys.exit()
-
-                    if (resp.status_code != responsecode):
-                        if (resp.status_code == 503 ) and (retry_cnt > 1):
+                        if (retry_cnt > 1):
                             sleep(0.1)
                             retry_cnt -= 1
-                            total_retry_count += 1
+                            connect_retry_count += 1
                         else:
-                            print("1Put failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code))
-                            print(url_out)
-                            print(str(data_out))
+                            print("1Put failed for id:"+uuid+str(i)+ ", "+str(e1) + " "+traceback.format_exc())
                             sys.exit()
-                    else:
-                        retry_cnt=-1
 
-    if (total_retry_count > 0):
-        print("0 retries:"+str(total_retry_count))
-    else:
-        print("0")
+                    if (connect_ok == True):
+                        if (resp.status_code == None):
+                            print("1Put failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: None")
+                            sys.exit()
+
+                        if (resp.status_code != responsecode):
+                            if (resp.status_code >= 500) and (http_retry_count < 600 ) and (retry_cnt > 1):
+                                sleep(0.1)
+                                retry_cnt -= 1
+                                http_retry_count += 1
+                            else:
+                                print("1Put failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code))
+                                print(url_out)
+                                print(str(data_out))
+                                sys.exit()
+                        else:
+                            retry_cnt=-1
+
+    print("0 http retries:"+str(http_retry_count) + ", connect retries: "+str(connect_retry_count))
     sys.exit()
 
 except Exception as e:
index ec69e13..530e877 100644 (file)
@@ -56,11 +56,13 @@ try:
     if uuid == "NOUUID":
         uuid=""
 
-    total_retry_count=0
+    http_retry_count=0
+    connect_retry_count=0
 
     stop=count*num_rics+start
     for i in range(start,stop):
         if (i%pids == (pid_id-1)):
+            connect_ok=False
             retry_cnt=5
             while(retry_cnt>0):
                 if ("/v2/policies/" in baseurl):
@@ -72,27 +74,32 @@ try:
                         resp=requests.delete(url, verify=False, timeout=90)
                     else:
                         resp=requests.delete(url, verify=False, timeout=90, proxies=proxydict)
+                    cconnect_ok=True
                 except Exception as e1:
-                    print("1Delete failed for id:"+uuid+str(i)+ ", "+str(e1) + " "+traceback.format_exc())
-                    sys.exit()
-                if (resp.status_code == None):
-                    print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: None")
-                    sys.exit()
-                if (resp.status_code != responsecode):
-                    if (resp.status_code == 503 ) and (retry_cnt > 1):
+                    if (retry_cnt > 1):
                         sleep(0.1)
                         retry_cnt -= 1
-                        total_retry_count += 1
+                        connect_retry_count += 1
                     else:
-                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code)+str(resp.raw))
+                        print("1Delete failed for id:"+uuid+str(i)+ ", "+str(e1) + " "+traceback.format_exc())
                         sys.exit()
-                else:
-                    retry_cnt=-1
 
-    if (total_retry_count > 0):
-        print("0 retries:"+str(total_retry_count))
-    else:
-        print("0")
+                if (cconnect_ok == True):
+                    if (resp.status_code == None):
+                        print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: None")
+                        sys.exit()
+                    if (resp.status_code != responsecode):
+                        if (resp.status_code >= 500) and (http_retry_count < 600 ) and (retry_cnt > 1):
+                            sleep(0.1)
+                            retry_cnt -= 1
+                            http_retry_count += 1
+                        else:
+                            print("1Delete failed for id:"+uuid+str(i)+ ", expected response code: "+str(responsecode)+", got: "+str(resp.status_code)+str(resp.raw))
+                            sys.exit()
+                    else:
+                        retry_cnt=-1
+
+    print("0 http retries:"+str(http_retry_count) + ", connect retries: "+str(connect_retry_count))
     sys.exit()
 
 except Exception as e:
index ba6af92..cf5f580 100644 (file)
@@ -259,9 +259,13 @@ start_ecs() {
                # A PVC of type hostPath is mounted to ECS, for persistent storage, so the ECS must always be on the node which mounted the volume
 
                # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
-               __ECS_WORKER_NODE=$(kubectl get pod -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
-               if [ -z "$__ECS_WORKER_NODE" ]; then
-                       echo -e $YELLOW" Cannot find worker node for pod for $ECS_APP_NAME, persistency may not work"$EYELLOW
+               if [ $retcode_i -eq 0 ]; then
+                       __ECS_WORKER_NODE=$(kubectl get pod -l "autotest=ECS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+                       if [ -z "$__ECS_WORKER_NODE" ]; then
+                               echo -e $YELLOW" Cannot find worker node for pod for $ECS_APP_NAME, persistency may not work"$EYELLOW
+                       fi
+               else
+                       echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
                fi
 
                echo " Retrieving host and ports for service..."
@@ -358,6 +362,14 @@ stop_ecs() {
        echo -e $BOLD"Stopping $ECS_DISPLAY_NAME"$EBOLD
 
        if [ $RUNMODE == "KUBE" ]; then
+
+               __check_prestarted_image "ECS"
+               if [ $? -eq 0 ]; then
+                       echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
+                       __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
+                       return 0
+               fi
+
                __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ECS
                echo "  Deleting the replica set - a new will be started when the app is started"
                tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ECS")
@@ -388,6 +400,14 @@ start_stopped_ecs() {
 
        if [ $RUNMODE == "KUBE" ]; then
 
+               __check_prestarted_image "ECS"
+               if [ $? -eq 0 ]; then
+                       echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
+                       __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+                       __check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
+                       return 0
+               fi
+
                # Tie the PMS to the same worker node it was initially started on
                # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
                if [ -z "$__ECS_WORKER_NODE" ]; then
@@ -403,7 +423,6 @@ start_stopped_ecs() {
                        fi
                        __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
                fi
-
        else
                docker start $ECS_APP_NAME &> ./tmp/.dockererr
                if [ $? -ne 0 ]; then
@@ -1752,6 +1771,25 @@ ecs_api_admin_reset() {
                fi
        done
 
+       __log_test_pass
+       return 0
+}
+
+##########################################
+####     Reset jobs and producers     ####
+##########################################
+
+
+# Admin reset to remove all data in ecs; jobs, producers etc
+# NOTE - only works in kubernetes and the pod should not be running
+# args: -
+# (Function for test scripts)
+
+ecs_kube_pvc_reset() {
+       __log_test_start $@
+
+       __kube_clean_pvc $ECS_APP_NAME nonrtric enrichmentservice-pvc /var/enrichment-coordinator-service/database
+
        __log_test_pass
        return 0
 }
\ No newline at end of file
index 5cac74c..c352ff1 100644 (file)
@@ -26,7 +26,7 @@
 # arg: <image-tag-suffix> (selects staging, snapshot, release etc)
 # <image-tag-suffix> is present only for images with staging, snapshot,release tags
 __KUBEPROXY_imagesetup() {
-       __check_and_create_image_var KUBEPROXY "KUBE_PROXY_IMAGE" "KUBE_PROXY_IMAGE_BASE" "KUBE_PROXY_IMAGE_TAG" REMOTE_PROXY "$KUBE_PROXY_DISPLAY_NAME"
+       __check_and_create_image_var KUBEPROXY "KUBE_PROXY_IMAGE" "KUBE_PROXY_IMAGE_BASE" "KUBE_PROXY_IMAGE_TAG" LOCAL "$KUBE_PROXY_DISPLAY_NAME"
 }
 
 # Pull image from remote repo or use locally built image
@@ -35,14 +35,29 @@ __KUBEPROXY_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both var may contain: 'remote', 'remote-remove' or 'local'
 __KUBEPROXY_imagepull() {
-       __check_and_pull_image $2 "$KUBE_PROXY_DISPLAY_NAME" $KUBE_PROXY_APP_NAME KUBE_PROXY_IMAGE
+       echo -e $RED"Image for app KUBEPROXY shall never be pulled from remote repo"$ERED
 }
 
 # Build image (only for simulator or interfaces stubs owned by the test environment)
 # arg: <image-tag-suffix> (selects staging, snapshot, release etc)
 # <image-tag-suffix> is present only for images with staging, snapshot,release tags
 __KUBEPROXY_imagebuild() {
-       echo -e $RED"Image for app KUBEPROXY shall never be built"$ERED
+       cd ../http-https-proxy
+       echo " Building KUBEPROXY - $KUBE_PROXY_DISPLAY_NAME - image: $KUBE_PROXY_IMAGE"
+       docker build  --build-arg NEXUS_PROXY_REPO=$NEXUS_PROXY_REPO -t $KUBE_PROXY_IMAGE . &> .dockererr
+       if [ $? -eq 0 ]; then
+               echo -e  $GREEN"  Build Ok"$EGREEN
+               __retag_and_push_image KUBE_PROXY_IMAGE
+               if [ $? -ne 0 ]; then
+                       exit 1
+               fi
+       else
+               echo -e $RED"  Build Failed"$ERED
+               ((RES_CONF_FAIL++))
+               cat .dockererr
+               echo -e $RED"Exiting...."$ERED
+               exit 1
+       fi
 }
 
 # Generate a string for each included image using the app display name and a docker images format string
@@ -65,7 +80,7 @@ __KUBEPROXY_kube_scale_zero() {
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
 # This function is called for prestarted apps not managed by the test script.
 __KUBEPROXY_kube_scale_zero_and_wait() {
-       echo -e $RED" Http proxy replicas kept as is"$ERED
+       echo -e $RED" KUBEPROXY app is not scaled in this state"$ERED
 }
 
 # Delete all kube resouces for the app
@@ -157,7 +172,7 @@ start_kube_proxy() {
 
                if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
                        #Local host found
-                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME host is: $CLUSTER_KUBE_PROXY_HOST. The proxy (mitmproxy) used by test script requires an ip so the ip is assumed and set to 127.0.0.1"$EYELLOW
+                       echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME host is: $CLUSTER_KUBE_PROXY_HOST"$EYELLOW
                        CLUSTER_KUBE_PROXY_HOST="127.0.0.1"
                else
                        if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
index 785ff9a..a835218 100644 (file)
@@ -475,7 +475,7 @@ sim_post_delete_instances() {
 # (Function for test scripts)
 sim_post_delete_all() {
        __log_conf_start $@
-       if [ $# -ne 3 ]; then
+       if [ $# -ne 2 ]; then
                __print_err "<response-code> <numericic-id>" $@
                return 1
        fi
index 59a1a0c..8dd205d 100755 (executable)
@@ -134,9 +134,9 @@ ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
-KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
-KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
-#No local image for http proxy, remote image always used
+KUBE_PROXY_IMAGE_BASE="nodejs-http-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
 
 
 # List of app short names produced by the project
index 337d3e1..62dfab8 100755 (executable)
@@ -157,9 +157,9 @@ ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
-KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
-KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
-#No local image for http proxy, remote image always used
+KUBE_PROXY_IMAGE_BASE="nodejs-http-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
 
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA SDNC"
index 0c06813..423d3ef 100644 (file)
@@ -157,9 +157,9 @@ ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
-KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
-KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
-#No local image for http proxy, remote image always used
+KUBE_PROXY_IMAGE_BASE="nodejs-http-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
 
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA SDNC"
index ca994aa..f75ff1d 100755 (executable)
@@ -161,9 +161,9 @@ ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
-KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
-KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
-#No local image for http proxy, remote image always used
+KUBE_PROXY_IMAGE_BASE="nodejs-http-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
 
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA ECS CP SDNC RC RICSIM"
index 211e068..f83b7b0 100755 (executable)
@@ -180,9 +180,9 @@ ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
-KUBE_PROXY_IMAGE_BASE="mitmproxy/mitmproxy"
-KUBE_PROXY_IMAGE_TAG_REMOTE_PROXY="6.0.2"
-#No local image for http proxy, remote image always used
+KUBE_PROXY_IMAGE_BASE="nodejs-http-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
 
 # List of app short names produced by the project
 PROJECT_IMAGES_APP_NAMES="PA ECS CP RC RICSIM NGW"  # Add SDNC here if oran image is used
index 1f6d135..b4ed9b4 100755 (executable)
@@ -1894,7 +1894,7 @@ __kube_create_configmap() {
 
 # This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
 # The function retries up to the timeout given in the cmd flag '--cluster-timeout'
-# args: <full kubectl cmd with parameters
+# args: <full kubectl cmd with parameters>
 # (Not for test scripts)
 __kube_cmd_with_timeout() {
        TS_TMP=$(($SECONDS+$CLUSTER_TIME_OUT))
@@ -1912,6 +1912,39 @@ __kube_cmd_with_timeout() {
        done
 }
 
+# This function starts a pod that cleans a the contents of a path mounted as a pvc
+# After this action the pod should terminate
+# This should only be executed when the pod owning the pvc is not running
+# args: <appname> <namespace> <pvc-name> <path-to remove>
+# (Not for test scripts)
+__kube_clean_pvc() {
+
+       export PVC_CLEANER_NAMESPACE=$2
+       export PVC_CLEANER_CLAIMNAME=$3
+       export PVC_CLEANER_RM_PATH=$4
+       input_yaml=$SIM_GROUP"/pvc-cleaner/"pvc-cleaner.yaml
+       output_yaml=$PWD/tmp/$2-pvc-cleaner.yaml
+
+       envsubst < $input_yaml > $output_yaml
+
+       kubectl delete -f $output_yaml #> /dev/null 2>&1    # Delete the previous terminated pod - if existing
+
+       __kube_create_instance pod pvc-cleaner $input_yaml $output_yaml
+       if [ $? -ne 0 ]; then
+               echo $YELLOW" Could not clean pvc for app: $1 - persistent storage not clean - tests may not work"
+               return 1
+       fi
+
+       term_ts=$(($SECONDS+30))
+       while [ $term_ts -gt $SECONDS ]; do
+               pod_status=$(kubectl get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase")
+               if [ "$pod_status" == "Succeeded" ]; then
+                       return 0
+               fi
+       done
+       return 1
+}
+
 # This function scales or deletes all resources for app selected by the testcase.
 # args: -
 # (Not for test scripts)
@@ -1920,22 +1953,26 @@ __clean_kube() {
 
        # Scale prestarted or managed apps
        for imagename in $APP_SHORT_NAMES; do
-               __check_included_image $imagename
+               # A function name is created from the app short name
+               # for example app short name 'RICMSIM' -> produce the function
+               # name __RICSIM_kube_scale_zero or __RICSIM_kube_scale_zero_and_wait
+               # This function is called and is expected to exist in the imported
+               # file for the ricsim test functions
+               # The resulting function impl shall scale the resources to 0
+               # For prestarted apps, the function waits until the resources are 0
+               # For included (not prestated) apps, the scaling is just ordered
+               __check_prestarted_image $imagename
                if [ $? -eq 0 ]; then
-                       # A function name is created from the app short name
-                       # for example app short name 'RICMSIM' -> produce the function
-                       # name __RICSIM_kube_scale_zero or __RICSIM_kube_scale_zero_and_wait
-                       # This function is called and is expected to exist in the imported
-                       # file for the ricsim test functions
-                       # The resulting function impl shall scale the resources to 0
-                       __check_prestarted_image $imagename
+                       function_pointer="__"$imagename"_kube_scale_zero_and_wait"
+                       echo -e " Scaling all kube resources for app $BOLD $imagename $EBOLD to 0"
+                       $function_pointer
+               else
+                       __check_included_image $imagename
                        if [ $? -eq 0 ]; then
-                               function_pointer="__"$imagename"_kube_scale_zero_and_wait"
-                       else
                                function_pointer="__"$imagename"_kube_scale_zero"
+                               echo -e " Scaling all kube resources for app $BOLD $imagename $EBOLD to 0"
+                               $function_pointer
                        fi
-                       echo -e " Scaling all kube resources for app $BOLD $imagename $EBOLD to 0"
-                       $function_pointer
                fi
        done
 
index fd23617..311c2f5 100644 (file)
@@ -27,7 +27,7 @@ PROJECT_IMAGES="PA ECS SDNC CP NGW RICSIM RC"
 AVAILABLE_IMAGES_OVERRIDE="PA ECS SDNC CP NGW RICSIM RC"
 
 # List of available apps where the image is built by the test environment
-LOCAL_IMAGE_BUILD="MR CR PRODSTUB"
+LOCAL_IMAGE_BUILD="MR CR PRODSTUB KUBEPROXY"
 
 
 #Integrate a new app into the test environment
diff --git a/test/http-https-proxy/.gitignore b/test/http-https-proxy/.gitignore
new file mode 100644 (file)
index 0000000..3384196
--- /dev/null
@@ -0,0 +1 @@
+.dockererr
\ No newline at end of file
diff --git a/test/http-https-proxy/Dockerfile b/test/http-https-proxy/Dockerfile
new file mode 100644 (file)
index 0000000..e961500
--- /dev/null
@@ -0,0 +1,14 @@
+ARG NEXUS_PROXY_REPO
+
+FROM ${NEXUS_PROXY_REPO}node:16-alpine3.12
+
+ENV NODE_ENV=production
+
+WORKDIR /usr/src/app/cert
+COPY cert/*.crt .
+COPY cert/pass .
+
+WORKDIR /usr/src/app
+COPY http_proxy.js .
+
+CMD [ "node", "http_proxy.js" ]
\ No newline at end of file
diff --git a/test/http-https-proxy/README.md b/test/http-https-proxy/README.md
new file mode 100644 (file)
index 0000000..271db4d
--- /dev/null
@@ -0,0 +1,63 @@
+## http proxy ##
+
+The http proxy is a generic http proxy which is able to proxy both http and https destination calls.
+The call to the proxy (to control the proxy call) also supports both http and https (https is using a self signed cert).
+The main usage for the proxy is as a gateway to all services and pod inside a kubernetest cluster.
+However, it can be used a basic standard http proxy as well.
+
+# Ports and certificates
+
+The proxy opens the http and https port according to the table below.
+
+| Port     | Proxy protocol | Usage |
+| -------- | ------ |----- |
+| 8080     | http   | Proxy call for http, can proxy both http and https |
+| 8433     | https  | Proxy call for https, can proxy both http and https |
+| 8081     | http   | Http port for alive check, returns json with basic statistics |
+| 8434     | https  | Https port for alive check, returns json with basic statistics |
+
+The dir cert contains a self-signed cert. Use the script generate_cert_and_key.sh to generate a new certificate and key before building the container, the certs need to be re-generated. If another cert is used, all three files (cert.crt, key.crt and pass) in the cert dir should be mounted to the dir '/usr/src/app/cert' in the container.
+
+
+### Proxy usage ###
+
+| Operation | curl example |
+| --------- | ------------ |
+| proxy http call via http | curl --proxy localhost:8080 http://pms:1234 |
+| proxy https call via http | curl --proxy localhost:8080 https://pms:5678 |
+| proxy http call via https | curl --proxy-insecure localhost:8433 http://pms:1234 |
+| proxy https call via https |  curl --proxy-insecure localhost:8433 https://pms:5678 |
+| alive check and get stats | curl localhost:8081 |
+| alive check and get stats via proxy |  curl --proxy localhost:8080 http://localhost:8081 |
+
+### Build and start ###
+
+>Build image<br>
+```docker build --build-arg NEXUS_PROXY_REPO=nexus3.onap.org:10001/ -t nodejs-http-proxy:latest .```
+
+>Start the image on both http and https<br>
+```docker run --rm -it -p 8080:8080 -p 8081:8081 -p 8433:8433 -p 8434:8434 nodejs-http-proxy:latest```
+
+It will listen to http ports 8080/8081 and https ports 8433/8434 (using default certificates) at the same time.
+
+The script ```proxy-build-start.sh``` do the above two steps in one go. This starts the container in stand-alone mode for basic test.<br>
+
+
+### Basic test ###
+
+Basic test is made with the script ```basic_test.sh``` which tests proxy. Use the script ```proxy-build-start.sh``` to start the proxy in a container first.
+
+## License
+
+Copyright (C) 2021 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/test/http-https-proxy/basic_test.sh b/test/http-https-proxy/basic_test.sh
new file mode 100755 (executable)
index 0000000..085e17f
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# Automated test script for callback receiver container
+
+
+echo "NOTE - No automatic response check - check manually"
+echo "All call shall return a json struct"
+echo ""
+
+CMD="curl --proxy localhost:8080 localhost:8081"
+echo "Running cmd: "$CMD
+$CMD
+echo ""
+if [ $? -eq 0 ]; then
+    echo "CMD OK"
+else
+    echo "CMD FAIL"
+fi
+echo ""
+
+CMD="curl --proxy localhost:8080 -k https://localhost:8434"
+echo "Running cmd: "$CMD
+$CMD
+echo ""
+if [ $? -eq 0 ]; then
+    echo "CMD OK"
+else
+    echo "CMD FAIL"
+fi
+echo ""
+
+CMD="curl --proxy-insecure localhost:8433 localhost:8081"
+echo "Running cmd: "$CMD
+$CMD
+echo ""
+if [ $? -eq 0 ]; then
+    echo "CMD OK"
+else
+    echo "CMD FAIL"
+fi
+echo ""
+
+CMD="curl --proxy-insecure localhost:8433 -k https://localhost:8434"
+echo "Running cmd: "$CMD
+$CMD
+echo ""
+if [ $? -eq 0 ]; then
+    echo "CMD OK"
+else
+    echo "CMD FAIL"
+fi
+echo ""
+
+echo "DONE"
diff --git a/test/http-https-proxy/cert/cert.crt b/test/http-https-proxy/cert/cert.crt
new file mode 100644 (file)
index 0000000..97a2c3b
--- /dev/null
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC2jCCAcICCQCoykk37ggbnjANBgkqhkiG9w0BAQsFADAvMQswCQYDVQQGEwJT
+RTESMBAGA1UEBwwJTGlua29waW5nMQwwCgYDVQQKDANFU1QwHhcNMjEwNTAzMTIw
+MjU1WhcNNDgwOTE3MTIwMjU1WjAvMQswCQYDVQQGEwJTRTESMBAGA1UEBwwJTGlu
+a29waW5nMQwwCgYDVQQKDANFU1QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQDKIgyIH3MRpXNGpPpjmlCgt+kmFvOq33AF4wu+zDHvBZL5zPiqHpacDjhl
+Q1gq+8Cxmu5awwMwlz8cWFqrCjkVE1wlUYKIBi/NU7B7hbPMiDSN8fw1bA+sf7Ke
+Fr3BdQ9rEHalUY/BLueNERaOeM79wiHCW+08+T2oixchaeWT/v1KINc/rfpAFVQZ
+9aBSiFecbmZhcL9YKNb3TOaPivrrXgFvrSUkoGCKvIPf9mx/xYMkEbwAHAUSVlAn
+MOWBWf+pQGl/sjuD5mBKS3uL1Gon4R4hXNGNafS/C0HmEbJB6r+OzdnqjumMhmtt
+WGFxYvxS6ChSAa6ni4Ae7tBtlqJPAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAIRk
+bicQn1gUALHz2YTGffBN6BCbIKKIzW9NJ/xIYBYUkMwyxpqq0GMRsqIaBIa9R1dr
+rcG27B8BTwUfeHH8Fjp1YR18axK5uh/RRHgIU5YixaMbdlxwfYDIxV9rf8xuQ/KR
+wQkYqxke5CyTZnqVXR08i3eYsa8XpUq7keqA1ecJBfG5d8g0RJg3/nK81V5PWMXL
+so9xq/dCUS4ulLu7XqXxBUK8pNjnP7fgg6C2+n39eZVv/LHdt05QIjFkEErBUbmz
+zJ9rCN5y70KGgr7HZioAE5buQJZaYWoekbZ/D7r5+j+25IWcWr7vy7Qq9IJA0ZIz
+879adxAkeJTzpdZkfkA=
+-----END CERTIFICATE-----
diff --git a/test/http-https-proxy/cert/generate_cert_and_key.sh b/test/http-https-proxy/cert/generate_cert_and_key.sh
new file mode 100755 (executable)
index 0000000..0f79bbf
--- /dev/null
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This will generate a self-signed certificate with password 'test'
+openssl req -x509 -passout pass:"test" -newkey rsa:2048 -keyout key.crt -out cert.crt -days 9999
diff --git a/test/http-https-proxy/cert/key.crt b/test/http-https-proxy/cert/key.crt
new file mode 100644 (file)
index 0000000..286f5bb
--- /dev/null
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6ID1Pm413GQCAggA
+MB0GCWCGSAFlAwQBKgQQxUi4NiTRO3ZtBXwkBMI/zwSCBNCPGAktH49M9OUptPqK
+ie6rIyhvdNMfj7OJL+/SYwTwK0unwsp1CgyZou7SUtfMPVzpSRtY+WuKVhtHSTIR
+4qrY20O5UNrRdIMnlm5daEld+drD5/ql2iTidAHY4idGum0KMpUMdG8G/pdFcD8m
+uJ/b4ogO30gtCra7R4yvrjaSji7yuGMms1p0BWgx8nURivy6zBvFew58WCC63RGe
+aouzavsu+7Xd8t4JXAvcH93zcfXr2uminiZ8d7CvwTfvrtunc0FyTfVl1u7vPB/z
+FzN952KlYK3ugHgSg5ZsqJ2FES2iBNX1I55BHTOIcXkNemcaszRAomrNXqyqk9X2
+36ojasw2fCLt+hluwD3VMH2gKs4XvUbxuYnz7Gfeq9AA9L0iBtZs0+aMG6fgQ7NC
+KvXty0XzupIW615vWmwd5UBubCslXmZKOfF/zyOfd+N59a54VoA/F9q1hfqiNz6L
+QbGbaFhLHO36P9q/i95IZpzZbRTakwF6ICLLdI6i2S9m/AqOUJeP+LKTTZ6pPgvu
+r/fxUIk3ByVzNa9BtqAwwHlqeAUmwj2r2pEF2wV7c4gaEvK/fEx5PsHE0IFbYtmN
+tCNTpj5irFaHfTAmddoaq2On9qo6iZHlxr7fu6cjCjKiF54j4nifF1OcNN1KV/u1
+MNEz8dcMdmiNb69Cr4jOFU9xhyTmZ7sQ6yicUVa8R4pZTThBPRfSLBpsSur/t+n6
+7XCYVC7RyN5Dd8sqK2QVL+dqUCEumiQoV+9ogaxbfo35Z7+LTJDcyHMNieJ8C01S
+Mt5H6TdsCPLtzmbF9Pgw3fYwC2U7pAZYppOMkgiWeg7pH9gyWKah0GfAK0VrCYWz
+9dR2QAkb5u39NAyQKv7nOJ4hQ7TFYiBxM5j1WtnVKuQ5WpDzweP+NcnTMBYnbYDA
+AbMp0VCKTyN5pRbC1JF7RPeFkiEAC1sMLfhxS9kB6UKgWO2jnl70QtUfxMsOkaon
+iDVpsdB0QtfWOayC53GeLTS2uG/wXd3mzq3+QmNd7MVYCtwU7TeZpPboaRkfLX03
+d/sLU2zxcQH1Xp7JWufiN0P1VpsytxGPVGQa4BZsNjX+SOJwU7wRydEqDBhJxSJP
+OrzAojG3K0sB/av7UBK+LsALI36lNso7H9ZzAHx1keYjDGBKRcsMsvtF9kwOW85v
+kYkseYvcedv5CHXOlTIC/GFXOVg7Ot+InCgKI3rj0EojzI7r+M1XlvSrJNzM++iX
+AgD/mBmiKovLPjW4JVkT4zZYDMoTa12QtFc0YPg4yb6rJkqqci7TW8Huy/HFAOST
+LX+j54vY1YflRE1aqcILmkrBZGRT7luQICFjcwUKRRpw1UuP4RRf7Zg6ZwXcshIN
+J1Vmrjo48Gqr4xotCvpgflOXqdZSXNbqyfvIxhnJqShWuVINDQ4CkdLjb+IQP+NF
+8BvfjsaVgEBbu/K5K87ltpuyvqWNSIWojMLJYin5sh1T1nqhCJViwZhCXWFbuPfj
+k12Iww/mFtPQmUlsWnonHNn3CHr0XbEWy7TvCK2VkjysteBmAdTROCv2r8LXTg/i
+63pIL055pqXLZMPxuIx8+rf/gAseXyl2/YfuO45oVVNbZz/d8fbFaxnU/K0zwzvr
+tr/T+QKZMBP0EC8BZ6Tsge6ZZA==
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/test/http-https-proxy/cert/pass b/test/http-https-proxy/cert/pass
new file mode 100644 (file)
index 0000000..30d74d2
--- /dev/null
@@ -0,0 +1 @@
+test
\ No newline at end of file
diff --git a/test/http-https-proxy/http_proxy.js b/test/http-https-proxy/http_proxy.js
new file mode 100644 (file)
index 0000000..714548a
--- /dev/null
@@ -0,0 +1,214 @@
+//  ============LICENSE_START===============================================
+//  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+//  ========================================================================
+//  Licensed under the Apache License, Version 2.0 (the "License");
+//  you may not use this file except in compliance with the License.
+//  You may obtain a copy of the License at
+//
+//       http://www.apache.org/licenses/LICENSE-2.0
+//
+//  Unless required by applicable law or agreed to in writing, software
+//  distributed under the License is distributed on an "AS IS" BASIS,
+//  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//  See the License for the specific language governing permissions and
+//  limitations under the License.
+//  ============LICENSE_END=================================================
+
+// Basic http/https proxy
+// Call the the proxy on 8080/8433 for http/https
+// The destination (proxied) protocol may be http or https
+// Proxy healthcheck on 8081/8434 for http/https - answers with statistics in json
+
+const http = require('http');
+const net = require('net');
+const urlp = require('url');
+const process = require('process')
+const https = require('https');
+const fs = require('fs');
+
+// Proxy server port for http
+const proxyport = 8080;
+// Proxy server port for https
+const proxyporthttps = 8433;
+// Proyx server alive check, port for http
+const aliveport = 8081;
+// Proyx server alive check,  port for https
+const aliveporthttps = 8434;
+
+// Default https destination port
+const defaulthttpsport = "443";
+
+// Certs etc for https
+const httpsoptions = {
+  key: fs.readFileSync('cert/key.crt'),
+  cert: fs.readFileSync('cert/cert.crt'),
+  passphrase: fs.readFileSync('cert/pass', 'utf8')
+};
+
+const stats = {
+  'http-requests-initiated': 0,
+  'http-requests-failed': 0,
+  'https-requests-initiated': 0,
+  'https-requests-failed': 0
+};
+
+// handle a http proxy request
+function httpclientrequest(clientrequest, clientresponse) {
+  stats['http-requests-initiated']++;
+
+  if (clientrequest.url == "/" ) {
+    console.log("Catch bad url in http request: "+clientrequest.url)
+    clientresponse.end();
+    return;
+  }
+  // Extract destination information
+  const clientrequesturl = new URL(clientrequest.url);
+
+  var proxyrequestoptions = {
+    'host': clientrequesturl.hostname,
+    'port': clientrequesturl.port,
+    'method': clientrequest.method,
+    'path': clientrequesturl.pathname+clientrequesturl.search,
+    'agent': clientrequest.agent,
+    'auth': clientrequest.auth,
+    'headers': clientrequest.headers
+  };
+
+  // Setup connection to destination
+  var proxyrequest = http.request(
+    proxyrequestoptions,
+    function (proxyresponse) {
+      clientresponse.writeHead(proxyresponse.statusCode, proxyresponse.headers);
+      proxyresponse.on('data', function (chunk) {
+        clientresponse.write(chunk);
+      });
+      proxyresponse.on('end', function () {
+        clientresponse.end();
+      });
+
+    }
+  );
+
+  // Handle the connection and data transfer between source and desitnation
+  proxyrequest.on('error', function (error) {
+    clientresponse.writeHead(500);
+    stats['http-requests-failed']++;
+    console.log(error);
+    clientresponse.write("<h1>500 Error</h1>\r\n" + "<p>Error was <pre>" + error + "</pre></p>\r\n" + "</body></html>\r\n");
+    clientresponse.end();
+  });
+  clientrequest.addListener('data', function (chunk) {
+    proxyrequest.write(chunk);
+  });
+  clientrequest.addListener('end', function () {
+    proxyrequest.end();
+  });
+}
+
+function main() {
+
+  // -------------------- Alive server ----------------------------------
+  // Responde with '200' and statistics for any path on the alive address
+  const alivelistener = function (req, res) {
+    console.log(stats)
+    res.writeHead(200, { 'Content-Type': 'application/json' });
+    res.write(JSON.stringify(stats))
+    res.end();
+  };
+
+  // The alive server - for healthckeck
+  const aliveserver = http.createServer(alivelistener);
+
+  // The alive server - for healthckeck
+  const aliveserverhttps = https.createServer(httpsoptions, alivelistener);
+
+  //Handle heatlhcheck requests
+  aliveserver.listen(aliveport, () => {
+    console.log('alive server on: '+aliveport);
+    console.log(' example: curl localhost: '+aliveport)
+  });
+
+  //Handle heatlhcheck requests
+  aliveserverhttps.listen(aliveporthttps, () => {
+    console.log('alive server on: '+aliveporthttps);
+    console.log(' example: curl -k https://localhost: '+aliveporthttps)
+  });
+
+  // -------------------- Proxy server ---------------------------------
+
+  // The proxy server
+  const proxyserver  = http.createServer(httpclientrequest).listen(proxyport);
+  console.log('http/https proxy for http proxy calls on port ' + proxyport);
+  console.log(' example: curl --proxy localhost:8080 http://pms:1234')
+  console.log(' example: curl -k --proxy localhost:8080 https://pms:5678')
+
+  const proxyserverhttps = https.createServer(httpsoptions, httpclientrequest).listen(proxyporthttps);
+  console.log('http/https proxy for https proxy calls on port ' + proxyporthttps);
+  console.log(' example: curl --proxy-insecure localhost:8433 http://pms:1234')
+  console.log(' example: curl --proxy-insecure localhost:8433 https://pms:5678')
+  console.log(' note: proxy shall not specify https')
+
+  // handle a http proxy request - https listener
+  proxyserver.addListener(
+    'connect',
+    function (request, socketrequest, bodyhead) {
+
+
+      stats['https-requests-initiated']++;
+      // Extract destination information
+      var res = request['url'].split(":")
+      var hostname = res[0]
+      var port = defaulthttpsport;
+      if (res[1] != null) {
+        port = res[1]
+      }
+
+      // Setup connection to destination
+      var httpversion = request['httpVersion'];
+      var proxysocket = new net.Socket();
+
+      proxysocket.connect(
+        parseInt(port), hostname,
+        function () {
+          proxysocket.write(bodyhead);
+          socketrequest.write("HTTP/" + httpversion + " 200 Connection established\r\n\r\n");
+        }
+      );
+
+      // Handle the connection and data transfer between source and desitnation
+      proxysocket.on('data', function (chunk) {
+        socketrequest.write(chunk);
+      });
+      proxysocket.on('end', function () {
+        socketrequest.end();
+      });
+
+      socketrequest.on('data', function (chunk) {
+        proxysocket.write(chunk);
+      });
+      socketrequest.on('end', function () {
+        proxysocket.end();
+      });
+
+      proxysocket.on('error', function (err) {
+        stats['https-requests-failed']++;
+        console.log(err);
+        socketrequest.write("HTTP/" + httpversion + " 500 Connection error\r\n\r\n");
+        socketrequest.end();
+      });
+      socketrequest.on('error', function (err) {
+        stats['https-requests-failed']++;
+        console.log(err);
+        proxysocket.end();
+      });
+    }
+  );
+}
+
+//Handle ctrl c when running in interactive mode
+process.on('SIGINT', () => {
+  console.info("Interrupted")
+  process.exit(0)
+})
+
+main();
diff --git a/test/http-https-proxy/proxy-build-start.sh b/test/http-https-proxy/proxy-build-start.sh
new file mode 100755 (executable)
index 0000000..553de51
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+#Builds the callback receiver container and starts it in interactive mode
+
+docker build --build-arg NEXUS_PROXY_REPO=nexus3.onap.org:10001/ -t nodejs-http-proxy:latest .
+
+docker run --rm -it -p 8080:8080 -p 8081:8081 -p 8433:8433 -p 8434:8434 nodejs-http-proxy:latest
index 3e6636b..b9f6115 100644 (file)
@@ -26,5 +26,3 @@ spec:
           containerPort: $KUBE_PROXY_INTERNAL_PORT
         - name: web
           containerPort: $KUBE_PROXY_WEB_INTERNAL_PORT
-        command: ["mitmweb"]
-        args: ["--web-host", "0.0.0.0", "--no-web-open-browser", "-p", "$KUBE_PROXY_INTERNAL_PORT", "--set", "ssl_insecure"]
diff --git a/test/simulator-group/pvc-cleaner/pvc-cleaner.yaml b/test/simulator-group/pvc-cleaner/pvc-cleaner.yaml
new file mode 100644 (file)
index 0000000..790ee87
--- /dev/null
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: pvc-cleaner
+  namespace: $PVC_CLEANER_NAMESPACE
+spec:
+  restartPolicy: Never
+  containers:
+  - name: pvc-cleaner
+    image: "ubuntu:20.10"
+    command: ["/bin/sh","-c"]
+    args: ["rm -rf $PVC_CLEANER_RM_PATH/*"]
+    volumeMounts:
+    - mountPath: $PVC_CLEANER_RM_PATH
+      name: pvc-cleaner-m-vol
+  volumes:
+  - name: pvc-cleaner-m-vol
+    persistentVolumeClaim:
+      claimName: $PVC_CLEANER_CLAIMNAME
\ No newline at end of file