Run test env towards selected kube cluster 59/7759/1
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Thu, 17 Feb 2022 14:01:28 +0000 (15:01 +0100)
committerBjornMagnussonXA <bjorn.magnusson@est.tech>
Thu, 17 Feb 2022 14:01:43 +0000 (15:01 +0100)
Added option to run test env towards local/remote kubernetes cluster

Issue-ID: NONRTRIC-728
Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I70c22990cfc1f44526933550d46970f600bf21aa

21 files changed:
test/common/chartmus_api_functions.sh
test/common/clean_kube.sh
test/common/clean_kube_ns.sh [new file with mode: 0755]
test/common/cp_api_functions.sh
test/common/cr_api_functions.sh
test/common/dmaapadp_api_functions.sh
test/common/dmaapmed_api_functions.sh
test/common/helmmanager_api_functions.sh
test/common/httpproxy_api_functions.sh
test/common/ics_api_functions.sh
test/common/kafkapc_api_functions.sh
test/common/kubeproxy_api_functions.sh
test/common/mr_api_functions.sh
test/common/ngw_api_functions.sh
test/common/pa_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/pvccleaner_api_functions.sh
test/common/rc_api_functions.sh
test/common/ricsim_api_functions.sh
test/common/sdnc_api_functions.sh
test/common/testcase_common.sh

index a9f09c0..f7e268e 100644 (file)
@@ -79,7 +79,7 @@ __CHARTMUS_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __CHARTMUS_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
        else
                docker logs $CHART_MUS_APP_NAME > $1$2_chartmuseum.log 2>&1
        fi
index 281f8ce..8a0453b 100755 (executable)
@@ -28,20 +28,22 @@ YELLOW="\033[33m\033[1m"
 EYELLOW="\033[0m"
 SAMELINE="\033[0K\r"
 
+KUBECONF=""
+
 __kube_scale_all_resources() {
 
        echo " Scaling down in namespace $1 ..."
        namespace=$1
        resources="deployment replicaset statefulset"
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
-                               count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+                               count=$(kubectl $KUBECONF get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
                                if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                        if [ $count -ne 0 ]; then
                                                echo "  Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count."
-                                               kubectl scale  $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
+                                               kubectl $KUBECONF scale  $restype $resid -n $namespace --replicas=0 1> /dev/null 2> /dev/null
                                        fi
                                fi
                        done
@@ -54,14 +56,14 @@ __kube_wait_for_zero_count() {
        namespace=$1
        resources="deployment replicaset statefulset"
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                T_START=$SECONDS
                                count=1
                                scaled=0
                                while [ $count -gt 0 ]; do
-                                       count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+                                       count=$(kubectl $KUBECONF get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
                                        if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                                if [ $count -ne 0 ]; then
                                                        echo -ne "  Scaling $restype $resid in namespace $namespace with label autotest to 0, current count=$count....$(($SECONDS-$T_START)) seconds"$SAMELINE
@@ -86,11 +88,11 @@ __kube_delete_all_resources() {
        namespace=$1
        resources="deployments replicaset statefulset services pods configmaps pvc serviceaccounts"
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                echo  "  Deleting $restype $resid in namespace $namespace with label autotest "
-                               kubectl delete --grace-period=1 $restype $resid -n $namespace 1> /dev/null 2> /dev/null
+                               kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace 1> /dev/null 2> /dev/null
                        done
                fi
        done
@@ -100,11 +102,11 @@ __kube_delete_all_pv() {
        echo " Delete all non-namespaced resources ..."
        resources="pv clusterrolebindings"
        for restype in $resources; do
-               result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                echo  "  Deleting $restype $resid with label autotest "
-                               kubectl delete --grace-period=1 $restype $resid 1> /dev/null 2> /dev/null
+                               kubectl $KUBECONF delete --grace-period=1 $restype $resid 1> /dev/null 2> /dev/null
                        done
                fi
        done
@@ -115,17 +117,17 @@ __kube_wait_for_delete() {
        namespace=$1
        resources="deployments replicaset statefulset services pods configmaps pvc "
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                echo  "  Deleting $restype $resid in namespace $namespace with label autotest "
-                               kubectl delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+                               kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
                                echo -ne "  Waiting for $restype $resid in namespace $namespace with label autotest to be deleted..."$SAMELINE
                                T_START=$SECONDS
                                result="dummy"
                                while [ ! -z "$result" ]; do
                                        sleep 0.5
-                                       result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+                                       result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                                        echo -ne "  Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
                                        if [ -z "$result" ]; then
                                                echo -e " Waiting for $restype $resid in namespace $namespace with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
@@ -143,17 +145,17 @@ __kube_wait_for_delete_pv() {
        echo " Wait for delete pv ..."
        resources="pv "
        for restype in $resources; do
-               result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                echo  "  Deleting $restype $resid with label autotest "
-                               kubectl delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
+                               kubectl $KUBECONF delete --grace-period=1 $restype $resid -n $namespace #1> /dev/null 2> /dev/null
                                echo -ne "  Waiting for $restype $resid with label autotest to be deleted..."$SAMELINE
                                T_START=$SECONDS
                                result="dummy"
                                while [ ! -z "$result" ]; do
                                        sleep 0.5
-                                       result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+                                       result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                                        echo -ne "  Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
                                        if [ -z "$result" ]; then
                                                echo -e " Waiting for $restype $resid with label autotest to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
@@ -170,8 +172,32 @@ __kube_wait_for_delete_pv() {
 
 echo "Will remove all kube resources marked with label 'autotest'"
 
+print_usage() {
+    echo "Usage: clean_kube.sh [--kubeconfig <kube-config-file>]"
+}
+
+if [ $# -eq 1 ]; then
+    print_usage
+    exit
+elif [ $# -eq 2 ]; then
+    if [ $1 == "--kubeconfig" ]; then
+        if [ ! -f $2 ]; then
+            echo "File $2 for --kubeconfig is not found"
+            print_usage
+            exit
+        fi
+        KUBECONF="--kubeconfig $2"
+    else
+        print_usage
+        exit
+    fi
+else
+    print_usage
+    exit
+fi
+
 # List all namespace and scale/delete per namespace
-nss=$(kubectl get ns  -o jsonpath='{.items[*].metadata.name}')
+nss=$(kubectl $KUBECONF get ns  -o jsonpath='{.items[*].metadata.name}')
 if [ ! -z "$nss" ]; then
        for ns in $nss; do
                __kube_scale_all_resources $ns
diff --git a/test/common/clean_kube_ns.sh b/test/common/clean_kube_ns.sh
new file mode 100755 (executable)
index 0000000..0a016d2
--- /dev/null
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+
+# Script to clean all namespaces from kubernetes having the label 'autotest', i.e started by autotest
+
+BOLD="\033[1m"
+EBOLD="\033[0m"
+RED="\033[31m\033[1m"
+ERED="\033[0m"
+GREEN="\033[32m\033[1m"
+EGREEN="\033[0m"
+YELLOW="\033[33m\033[1m"
+EYELLOW="\033[0m"
+SAMELINE="\033[0K\r"
+
+KUBECONF=""
+
+echo "Will remove all kube namespaces marked with label 'autotest'"
+
+print_usage() {
+    echo "Usage: clean_kube_ns.sh [--kubeconfig <kube-config-file>]"
+}
+
+if [ $# -eq 1 ]; then
+    print_usage
+    exit
+elif [ $# -eq 2 ]; then
+    if [ $1 == "--kubeconfig" ]; then
+        if [ ! -f $2 ]; then
+            echo "File $2 for --kubeconfig is not found"
+            print_usage
+            exit
+        fi
+        KUBECONF="--kubeconfig $2"
+    else
+        print_usage
+        exit
+    fi
+else
+    print_usage
+    exit
+fi
+
+indent1() { sed 's/^/ /'; }
+
+nss=$(kubectl $KUBECONF get ns -o 'jsonpath={.items[?(@.metadata.labels.autotest)].metadata.name}')
+if [ ! -z "$nss" ]; then
+       for ns in $nss; do
+               echo "Deleting namespace: "$ns
+               kubectl $KUBECONF delete ns $ns | indent1
+       done
+fi
+echo "Done"
\ No newline at end of file
index 803184a..f9f689a 100644 (file)
@@ -79,7 +79,7 @@ __CP_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __CP_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=CP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_control-panel.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=CP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_control-panel.log 2>&1
        else
                docker logs $CONTROL_PANEL_APP_NAME > $1$2_control-panel.log 2>&1
        fi
index a12b69e..45ed1fb 100644 (file)
@@ -94,8 +94,8 @@ __CR_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __CR_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               for podname in $(kubectl get pods -n $KUBE_SIM_NAMESPACE -l "autotest=CR" -o custom-columns=":metadata.name"); do
-                       kubectl logs -n $KUBE_SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+               for podname in $(kubectl $KUBECONF get pods -n $KUBE_SIM_NAMESPACE -l "autotest=CR" -o custom-columns=":metadata.name"); do
+                       kubectl $KUBECONF logs -n $KUBE_SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
                done
        else
                crs=$(docker ps --filter "name=$CR_APP_NAME" --filter "network=$DOCKER_SIM_NWNAME" --filter "status=running" --format {{.Names}})
index 9f8dc5f..d7e8c0d 100644 (file)
@@ -79,7 +79,7 @@ __DMAAPADP_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __DMAAPADP_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=DMAAPADP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapadapter.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=DMAAPADP" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapadapter.log 2>&1
        else
                docker logs $DMAAP_ADP_APP_NAME > $1$2_dmaapadapter.log 2>&1
        fi
index ef99ee1..c9c9c15 100644 (file)
@@ -79,7 +79,7 @@ __DMAAPMED_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __DMAAPMED_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=DMAAPMED" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapmediator.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=DMAAPMED" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_dmaapmediator.log 2>&1
        else
                docker logs $DMAAP_MED_APP_NAME > $1$2_dmaapmediator.log 2>&1
        fi
index a5a9a09..42d2bca 100644 (file)
@@ -71,7 +71,7 @@ __HELMMANAGER_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __HELMMANAGER_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
        else
                docker logs $HELM_MANAGER_APP_NAME > $1$2_helmmanager.log 2>&1
        fi
@@ -517,7 +517,7 @@ helm_manager_api_exec_add_repo() {
                        return 1
                fi
        else
-               retmsg=$(kubectl exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
+               retmsg=$(kubectl $KUBECONF exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
                retcode=$?
                if [ $retcode -ne 0 ]; then
                        __log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
index c417d42..e805264 100644 (file)
@@ -93,7 +93,7 @@ __HTTPPROXY_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __HTTPPROXY_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=HTTPPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_httpproxy.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=HTTPPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_httpproxy.log 2>&1
        else
                docker logs $HTTP_PROXY_APP_NAME > $1$2_httpproxy.log 2>&1
        fi
index 0e87517..05638af 100644 (file)
@@ -78,7 +78,7 @@ __ICS_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __ICS_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
        else
                docker logs $ICS_APP_NAME > $1$2_ics.log 2>&1
        fi
@@ -357,7 +357,7 @@ stop_ics() {
 
                __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ICS
                echo "  Deleting the replica set - a new will be started when the app is started"
-               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
+               tmp=$(kubectl $KUBECONF delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
                if [ $? -ne 0 ]; then
                        echo -e $RED" Could not delete replica set "$RED
                        ((RES_CONF_FAIL++))
@@ -403,7 +403,7 @@ start_stopped_ics() {
                else
                        echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ICS_WORKER_NODE to deployment for $ICS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
                        echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
-                       tmp=$(kubectl patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
+                       tmp=$(kubectl $KUBECONF patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
                        if [ $? -ne 0 ]; then
                                echo -e $YELLOW" Cannot set nodeSelector to deployment for $ICS_APP_NAME, persistency may not work"$EYELLOW
                        fi
@@ -2449,7 +2449,7 @@ ics_api_admin_reset() {
 ics_kube_pvc_reset() {
        __log_test_start $@
 
-       pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep information)
+       pvc_name=$(kubectl $KUBECONF get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep information)
        if [ -z "$pvc_name" ]; then
                pvc_name=informationservice-pvc
        fi
index 4b15641..fee52ae 100644 (file)
@@ -94,7 +94,7 @@ __KAFKAPC_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __KAFKAPC_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=KAFKAPC" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kafkapc.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=KAFKAPC" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kafkapc.log 2>&1
        else
                docker logs $KAFKAPC_APP_NAME > $1$2_kafkapc.log 2>&1
        fi
index 38aeb21..8fbccf0 100644 (file)
@@ -94,7 +94,7 @@ __KUBEPROXY_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __KUBEPROXY_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=KUBEPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kubeproxy.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=KUBEPROXY" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_kubeproxy.log 2>&1
        else
                docker logs $KUBE_PROXY_APP_NAME > $1$2_kubeproxy.log 2>&1
        fi
@@ -249,7 +249,7 @@ start_kube_proxy() {
 
                #Finding host of the proxy
                echo "  Trying to find svc hostname..."
-               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
+               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl $KUBECONF get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].hostname}")
 
 
                if [ "$CLUSTER_KUBE_PROXY_HOST" == "localhost" ]; then
@@ -260,7 +260,7 @@ start_kube_proxy() {
                        if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
                                #Host of proxy not found, trying to find the ip....
                                echo "  Trying to find svc ip..."
-                               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].ip}")
+                               CLUSTER_KUBE_PROXY_HOST=$(__kube_cmd_with_timeout "kubectl $KUBECONF get svc $KUBE_PROXY_APP_NAME -n $KUBE_SIM_NAMESPACE  -o jsonpath={.status.loadBalancer.ingress[0].ip}")
                                if [ ! -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
                                        #Host ip found
                                        echo -e $YELLOW" The test environment svc $KUBE_PROXY_APP_NAME ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
@@ -277,7 +277,7 @@ start_kube_proxy() {
                fi
                if [ -z "$CLUSTER_KUBE_PROXY_HOST" ]; then
                        #Host/ip of proxy not found, try to use the cluster and the nodeports of the proxy
-                       CLUSTER_KUBE_PROXY_HOST=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+                       CLUSTER_KUBE_PROXY_HOST=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
                        echo -e $YELLOW" The test environment cluster ip is: $CLUSTER_KUBE_PROXY_HOST."$EYELLOW
                        CLUSTER_KUBE_PROXY_PORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "http$PORT_KEY_PREFIX")  # port for proxy access
                        KUBE_PROXY_WEB_NODEPORT=$(__kube_get_service_nodeport $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE "web$PORT_KEY_PREFIX")  # web port, only for alive test
index 122b412..458070e 100755 (executable)
@@ -158,7 +158,7 @@ __DMAAPMR_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __MR_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=MR" -n $KUBE_ONAP_NAMESPACE --tail=-1 > $1$2_mr_stub.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=MR" -n $KUBE_ONAP_NAMESPACE --tail=-1 > $1$2_mr_stub.log 2>&1
        else
                docker logs $MR_STUB_APP_NAME > $1$2_mr_stub.log 2>&1
        fi
@@ -169,8 +169,8 @@ __MR_store_docker_logs() {
 # args: <log-dir> <file-prexix>
 __DMAAPMR_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               for podname in $(kubectl get pods -n $KUBE_ONAP_NAMESPACE -l "autotest=DMAAPMR" -o custom-columns=":metadata.name"); do
-                       kubectl logs -n $KUBE_ONAP_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+               for podname in $(kubectl $KUBECONF get pods -n $KUBE_ONAP_NAMESPACE -l "autotest=DMAAPMR" -o custom-columns=":metadata.name"); do
+                       kubectl $KUBECONF logs -n $KUBE_ONAP_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
                done
        else
                docker logs $MR_DMAAP_APP_NAME > $1$2_mr.log 2>&1
index bb37799..1886ad8 100644 (file)
@@ -79,7 +79,7 @@ __NGW_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __NGW_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=NGW" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_gateway.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=NGW" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_gateway.log 2>&1
        else
                docker logs $NRT_GATEWAY_APP_NAME > $1$2_gateway.log 2>&1
        fi
index 9076bab..9a0a162 100644 (file)
@@ -78,7 +78,7 @@ __PA_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __PA_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_policy-agent.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_policy-agent.log 2>&1
        else
                docker logs $POLICY_AGENT_APP_NAME > $1$2_policy-agent.log 2>&1
        fi
@@ -311,7 +311,7 @@ start_policy_agent() {
 
                # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
                if [ $retcode_i -eq 0 ]; then
-                       __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+                       __PA_WORKER_NODE=$(kubectl $KUBECONF get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
                        if [ -z "$__PA_WORKER_NODE" ]; then
                                echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
                        fi
@@ -382,7 +382,7 @@ stop_policy_agent() {
                fi
                __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
                echo "  Deleting the replica set - a new will be started when the app is started"
-               tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
+               tmp=$(kubectl $KUBECONF delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA")
                if [ $? -ne 0 ]; then
                        echo -e $RED" Could not delete replica set "$RED
                        ((RES_CONF_FAIL++))
@@ -428,7 +428,7 @@ start_stopped_policy_agent() {
                else
                        echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
                        echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
-                       tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
+                       tmp=$(kubectl $KUBECONF patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}')
                        if [ $? -ne 0 ]; then
                                echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW
                        fi
@@ -508,13 +508,13 @@ prepare_consul_config() {
        config_json=$config_json"\n   \"ric\": ["
 
        if [ $RUNMODE == "KUBE" ]; then
-               result=$(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
+               result=$(kubectl $KUBECONF get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
                rics=""
                ric_cntr=0
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for im in $result; do
                                if [[ $im != *"-0" ]]; then
-                                       ric_subdomain=$(kubectl get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
+                                       ric_subdomain=$(kubectl $KUBECONF get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
                                        rics=$rics" "$im"."$ric_subdomain"."$KUBE_A1SIM_NAMESPACE
                                        let ric_cntr=ric_cntr+1
                                fi
@@ -2370,7 +2370,7 @@ api_get_configuration() {
 pms_kube_pvc_reset() {
        __log_test_start $@
 
-       pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep policy)
+       pvc_name=$(kubectl $KUBECONF get pvc -n $KUBE_NONRTRIC_NAMESPACE  --no-headers -o custom-columns=":metadata.name" | grep policy)
        if [ -z "$pvc_name" ]; then
                pvc_name=policymanagementservice-vardata-pvc
        fi
index b3e3dea..9af00d8 100644 (file)
@@ -94,7 +94,7 @@ __PRODSTUB_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __PRODSTUB_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=PRODSTUB" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_prodstub.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=PRODSTUB" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_prodstub.log 2>&1
        else
                docker logs $PROD_STUB_APP_NAME > $1$2_prodstub.log 2>&1
        fi
index feb4440..26bee7b 100644 (file)
@@ -79,7 +79,7 @@ __PVCCLEANER_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __PVCCLEANER_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=PRODSTUB" -A --tail=-1 > $1$2_pvs_cleaner.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=PRODSTUB" -A --tail=-1 > $1$2_pvs_cleaner.log 2>&1
        fi
 }
 
index b17b6bf..050ddd1 100644 (file)
@@ -71,7 +71,7 @@ __RC_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __RC_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=RC" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_rc.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=RC" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_rc.log 2>&1
        else
                docker logs $RAPP_CAT_APP_NAME > $1$2_rc.log 2>&1
        fi
index f433cad..a366a9d 100644 (file)
@@ -72,8 +72,8 @@ __RICSIM_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __RICSIM_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               for podname in $(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -l "autotest=RICSIM" -o custom-columns=":metadata.name"); do
-                       kubectl logs -n $KUBE_A1SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
+               for podname in $(kubectl $KUBECONF get pods -n $KUBE_A1SIM_NAMESPACE -l "autotest=RICSIM" -o custom-columns=":metadata.name"); do
+                       kubectl $KUBECONF logs -n $KUBE_A1SIM_NAMESPACE $podname --tail=-1 > $1$2_$podname.log 2>&1
                done
        else
 
index 68cf976..55a6a15 100644 (file)
@@ -93,9 +93,9 @@ __SDNC_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __SDNC_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
-               podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
-               kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+               kubectl $KUBECONF  logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+               podname=$(kubectl $KUBECONF get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+               kubectl $KUBECONF exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
        else
                docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
        fi
index c9374cf..a94d3e2 100755 (executable)
 __print_args() {
        echo "Args: remote|remote-remove docker|kube --env-file <environment-filename> [release] [auto-clean] [--stop-at-error] "
        echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
-       echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
+       echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address>]"
        echo "      [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
        echo "      [--override <override-environment-filename>] [--pre-clean] [--gen-stats] [--delete-namespaces]"
-       echo "      [--delete-containers] [--endpoint-stats]"
+       echo "      [--delete-containers] [--endpoint-stats] [--kubeconfig <config-file>]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -64,6 +64,7 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "--delete-namespaces   -  Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps."
        echo "--delete-containers   -  Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only."
        echo "--endpoint-stats      -  Collect endpoint statistics"
+       echo "--kubeconfig          -  Configure kubectl to use cluster specific cluster config file"
        echo ""
        echo "List of app short names supported: "$APP_SHORT_NAMES
        exit 0
@@ -266,6 +267,9 @@ DELETE_KUBE_NAMESPACES=0
 #Var to control if containers shall be delete before test setup
 DELETE_CONTAINERS=0
 
+#Var to configure kubectl from a config file.
+KUBECONF=""
+
 #File to keep deviation messages
 DEVIATION_FILE=".tmp_deviations"
 rm $DEVIATION_FILE &> /dev/null
@@ -810,7 +814,29 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do
                        foundparm=0
                fi
        fi
-
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--kubeconfig" ]; then
+                       shift;
+                       if [ -z "$1" ]; then
+                               paramerror=1
+                               if [ -z "$paramerror_str" ]; then
+                                       paramerror_str="No path found for : '--kubeconfig'"
+                               fi
+                       else
+                           if [ -f  $1 ]; then
+                                       KUBECONF="--kubeconfig $1"
+                                       echo "Option set - Kubeconfig path: "$1
+                                       shift;
+                                       foundparm=0
+                               else
+                                       paramerror=1
+                                       if [ -z "$paramerror_str" ]; then
+                                               paramerror_str="File $1 for --kubeconfig not found"
+                                       fi
+                               fi
+                       fi
+               fi
+       fi
 done
 echo ""
 
@@ -995,17 +1021,17 @@ else
        if [ $RUNMODE == "KUBE" ]; then
                echo " kubectl is installed and using versions:"
                echo $(kubectl version --short=true) | indent2
-               res=$(kubectl cluster-info 2>&1)
+               res=$(kubectl $KUBECONF cluster-info 2>&1)
                if [ $? -ne 0 ]; then
                        echo -e "$BOLD$RED############################################# $ERED$EBOLD"
-                       echo -e  $BOLD$RED"Command 'kubectl cluster-info' returned error $ERED$EBOLD"
+                       echo -e  $BOLD$RED"Command 'kubectl '$KUBECONF' cluster-info' returned error $ERED$EBOLD"
                        echo -e "$BOLD$RED############################################# $ERED$EBOLD"
                        echo " "
                        echo "kubectl response:"
                        echo $res
                        echo " "
                        echo "This script may have been started with user with no permission to run kubectl"
-                       echo "Try running with 'sudo' or set 'KUBECONFIG'"
+                       echo "Try running with 'sudo', set env KUBECONFIG or set '--kubeconfig' parameter"
                        echo "Do either 1, 2 or 3 "
                        echo " "
                        echo "1"
@@ -1018,13 +1044,14 @@ else
                        echo -e $BOLD"sudo -E <test-script-and-parameters>"$EBOLD
                        echo " "
                        echo "3"
-                       echo "Set KUBECONFIG inline (replace user)"
-                       echo -e $BOLD"sudo  KUBECONFIG='/home/<user>/.kube/config' <test-script-and-parameters>"$EBOLD
+                       echo "Set KUBECONFIG via script parameter"
+                       echo -e $BOLD"sudo ... --kubeconfig /home/<user>/.kube/<config-file> ...."$EBOLD
+                       echo "The config file need to downloaded from the cluster"
 
                        exit 1
                fi
                echo " Node(s) and container runtime config"
-               kubectl get nodes -o wide | indent2
+               kubectl $KUBECONF get nodes -o wide | indent2
        fi
 fi
 
@@ -1483,7 +1510,7 @@ setup_testenvironment() {
 
 
        if [ "$DELETE_KUBE_NAMESPACES" -eq 1 ]; then
-               test_env_namespaces=$(kubectl get ns  --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env
+               test_env_namespaces=$(kubectl $KUBECONF get ns  --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env
                if [ $? -ne 0 ]; then
                        echo " Cannot get list of namespaces...ignoring delete"
                else
@@ -1492,7 +1519,7 @@ setup_testenvironment() {
                        done
                fi
        else
-               echo " Namespace delete option not set"
+               echo " Namespace delete option not set or ignored"
        fi
        echo ""
 
@@ -1505,7 +1532,7 @@ setup_testenvironment() {
                echo " Removing stopped containers..."
                docker rm $(docker ps -qa  --filter "label=nrttest_app") 2> /dev/null
        else
-               echo " Contatiner delete option not set"
+               echo " Contatiner delete option not set or ignored"
        fi
        echo ""
 
@@ -1682,7 +1709,7 @@ setup_testenvironment() {
                        echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine"
                        export KUBE_IMAGE_PULL_POLICY="Always"
                fi
-               CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
+               CLUSTER_IP=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}')
                echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW
 
                echo "================================================================================="
@@ -2059,12 +2086,12 @@ __clean_containers() {
 # Get resource type for scaling
 # args: <resource-name> <namespace>
 __kube_get_resource_type() {
-       kubectl get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -eq 0 ]; then
                echo "deployment"
                return 0
        fi
-       kubectl get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -eq 0 ]; then
                echo "sts"
                return 0
@@ -2078,7 +2105,7 @@ __kube_get_resource_type() {
 # (Not for test scripts)
 __kube_scale() {
        echo -ne "  Setting $1 $2 replicas=$4 in namespace $3"$SAMELINE
-       kubectl scale  $1 $2  -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF scale  $1 $2  -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -ne 0 ]; then
                echo -e "  Setting $1 $2 replicas=$4 in namespace $3 $RED Failed $ERED"
                ((RES_CONF_FAIL++))
@@ -2091,7 +2118,7 @@ __kube_scale() {
        TSTART=$SECONDS
 
        for i in {1..500}; do
-               count=$(kubectl get $1/$2  -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null)
+               count=$(kubectl $KUBECONF get $1/$2  -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null)
                retcode=$?
                if [ -z "$count" ]; then
                        #No value is sometimes returned for some reason, in case the resource has replica 0
@@ -2129,11 +2156,11 @@ __kube_scale_all_resources() {
        labelid=$3
        resources="deployment replicaset statefulset"
        for restype in $resources; do
-               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        for resid in $result; do
                                echo -ne "  Ordered caling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"$SAMELINE
-                               kubectl scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
+                               kubectl $KUBECONF scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
                                echo -e "  Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN"
                        done
                fi
@@ -2159,18 +2186,18 @@ __kube_scale_and_wait_all_resources() {
                scaled_all=0
                for restype in $resources; do
                    if [ -z "$3" ]; then
-                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}')
+                               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}')
                        else
-                               result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+                               result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                        fi
                        if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                                for resid in $result; do
                                        echo -e "   Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"
-                                       kubectl scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
+                                       kubectl $KUBECONF scale  $restype $resid  -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr
                                        count=1
                                        T_START=$SECONDS
                                        while [ $count -ne 0 ]; do
-                                               count=$(kubectl get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
+                                               count=$(kubectl $KUBECONF get $restype $resid  -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null)
                                                echo -ne "    Scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count"$SAMELINE
                                                if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                                        sleep 0.5
@@ -2212,14 +2239,14 @@ __kube_delete_all_resources() {
                        ns_flag=""
                        ns_text=""
                fi
-               result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+               result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        deleted_resourcetypes=$deleted_resourcetypes" "$restype
                        for resid in $result; do
                                if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then
                                        count=1
                                        while [ $count -ne 0 ]; do
-                                               count=$(kubectl get $restype $resid  $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null)
+                                               count=$(kubectl $KUBECONF get $restype $resid  $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null)
                                                echo -ne "  Scaling $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count"$SAMELINE
                                                if [ $? -eq 0 ] && [ ! -z "$count" ]; then
                                                        sleep 0.5
@@ -2230,7 +2257,7 @@ __kube_delete_all_resources() {
                                        echo -e "  Scaled $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN"
                                fi
                                echo -ne "  Deleting $restype $resid $ns_text with label $labelname=$labelid "$SAMELINE
-                               kubectl delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr
+                               kubectl $KUBECONF delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr
                                if [ $? -eq 0 ]; then
                                        echo -e "  Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN OK $EGREEN"
                                else
@@ -2253,7 +2280,7 @@ __kube_delete_all_resources() {
                        result="dummy"
                        while [ ! -z "$result" ]; do
                                sleep 0.5
-                               result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
+                               result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                                echo -ne "  Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE
                                if [ -z "$result" ]; then
                                        echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN"
@@ -2272,17 +2299,17 @@ __kube_delete_all_resources() {
 __kube_create_namespace() {
 
        #Check if test namespace exists, if not create it
-       kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -ne 0 ]; then
                echo -ne " Creating namespace "$1 $SAMELINE
-               kubectl create namespace $1 1> /dev/null 2> ./tmp/kubeerr
+               kubectl $KUBECONF create namespace $1 1> /dev/null 2> ./tmp/kubeerr
                if [ $? -ne 0 ]; then
                        echo -e " Creating namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
                        ((RES_CONF_FAIL++))
                        echo "  Message: $(<./tmp/kubeerr)"
                        return 1
                else
-                       kubectl label ns $1 autotest=engine
+                       kubectl $KUBECONF label ns $1 autotest=engine
                        echo -e " Creating namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN"
                fi
        else
@@ -2297,10 +2324,10 @@ __kube_create_namespace() {
 __kube_delete_namespace() {
 
        #Check if test namespace exists, if so remove it
-       kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -eq 0 ]; then
                echo -ne " Removing namespace "$1 $SAMELINE
-               kubectl delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
+               kubectl $KUBECONF delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
                if [ $? -ne 0 ]; then
                        echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
                        ((RES_CONF_FAIL++))
@@ -2346,7 +2373,7 @@ __kube_get_service_host() {
                exit 1
        fi
        for timeout in {1..60}; do
-               host=$(kubectl get svc $1  -n $2 -o jsonpath='{.spec.clusterIP}')
+               host=$(kubectl $KUBECONF get svc $1  -n $2 -o jsonpath='{.spec.clusterIP}')
                if [ $? -eq 0 ]; then
                        if [ ! -z "$host" ]; then
                                echo $host
@@ -2371,7 +2398,7 @@ __kube_get_service_port() {
        fi
 
        for timeout in {1..60}; do
-               port=$(kubectl get svc $1  -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}')
+               port=$(kubectl $KUBECONF get svc $1  -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}')
                if [ $? -eq 0 ]; then
                        if [ ! -z "$port" ]; then
                                echo $port
@@ -2396,7 +2423,7 @@ __kube_get_service_nodeport() {
        fi
 
        for timeout in {1..60}; do
-               port=$(kubectl get svc $1  -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}')
+               port=$(kubectl $KUBECONF get svc $1  -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}')
                if [ $? -eq 0 ]; then
                        if [ ! -z "$port" ]; then
                                echo $port
@@ -2416,7 +2443,7 @@ __kube_get_service_nodeport() {
 __kube_create_instance() {
        echo -ne " Creating $1 $2"$SAMELINE
        envsubst < $3 > $4
-       kubectl apply -f $4 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF apply -f $4 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -ne 0 ]; then
                ((RES_CONF_FAIL++))
                echo -e " Creating $1 $2 $RED Failed $ERED"
@@ -2434,21 +2461,21 @@ __kube_create_configmap() {
        echo -ne " Creating configmap $1 "$SAMELINE
        envsubst < $5 > $5"_tmp"
        cp $5"_tmp" $5  #Need to copy back to orig file name since create configmap neeed the original file name
-       kubectl create configmap $1  -n $2 --from-file=$5 --dry-run=client -o yaml > $6
+       kubectl $KUBECONF create configmap $1  -n $2 --from-file=$5 --dry-run=client -o yaml > $6
        if [ $? -ne 0 ]; then
                echo -e " Creating configmap $1 $RED Failed $ERED"
                ((RES_CONF_FAIL++))
                return 1
        fi
 
-       kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF apply -f $6 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -ne 0 ]; then
                echo -e " Creating configmap $1 $RED Apply failed $ERED"
                echo "  Message: $(<./tmp/kubeerr)"
                ((RES_CONF_FAIL++))
                return 1
        fi
-       kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
+       kubectl $KUBECONF label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
        if [ $? -ne 0 ]; then
                echo -e " Creating configmap $1 $RED Labeling failed $ERED"
                echo "  Message: $(<./tmp/kubeerr)"
@@ -2456,7 +2483,7 @@ __kube_create_configmap() {
                return 1
        fi
        # Log the resulting map
-       kubectl get configmap $1 -n $2 -o yaml > $6
+       kubectl $KUBECONF get configmap $1 -n $2 -o yaml > $6
 
        echo -e " Creating configmap $1 $GREEN OK $EGREEN"
        return 0
@@ -2500,7 +2527,7 @@ __kube_clean_pvc() {
 
        envsubst < $input_yaml > $output_yaml
 
-       kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null   # Delete the previous terminated pod - if existing
+       kubectl $KUBECONF delete -f $output_yaml 1> /dev/null 2> /dev/null   # Delete the previous terminated pod - if existing
 
        __kube_create_instance pod $PVC_CLEANER_APP_NAME $input_yaml $output_yaml
        if [ $? -ne 0 ]; then
@@ -2510,7 +2537,7 @@ __kube_clean_pvc() {
 
        term_ts=$(($SECONDS+30))
        while [ $term_ts -gt $SECONDS ]; do
-               pod_status=$(kubectl get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase")
+               pod_status=$(kubectl $KUBECONF get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase")
                if [ "$pod_status" == "Succeeded" ]; then
                        return 0
                fi
@@ -2586,7 +2613,7 @@ clean_environment() {
                __clean_containers
                if [ $PRE_CLEAN -eq 1 ]; then
                        echo " Cleaning kubernetes resouces to free up resources, may take time..."
-                       ../common/clean_kube.sh 2>&1 /dev/null
+                       ../common/clean_kube.sh $KUBECONF 2>&1 /dev/null
                        echo ""
                fi
        fi
@@ -2887,11 +2914,11 @@ store_logs() {
                done
        fi
        if [ $RUNMODE == "KUBE" ]; then
-               namespaces=$(kubectl  get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}')
+               namespaces=$(kubectl $KUBECONF  get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}')
                for nsid in $namespaces; do
-                       pods=$(kubectl get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
+                       pods=$(kubectl $KUBECONF get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                        for podid in $pods; do
-                               kubectl logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log
+                               kubectl $KUBECONF logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log
                        done
                done
        fi