X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=test%2Fcommon%2Ftestcase_common.sh;h=5ebc9d3ae5e77c6abfe0f5c6b9b1677fbea1302d;hb=0a882454d203e62e18f55ed1d3d5b3790801169b;hp=c9374cf80597bf37c0a146b45311325b9d2a16d9;hpb=36acb0c596211d3d771deb421eb71a05b3489e84;p=nonrtric.git diff --git a/test/common/testcase_common.sh b/test/common/testcase_common.sh index c9374cf8..5ebc9d3a 100755 --- a/test/common/testcase_common.sh +++ b/test/common/testcase_common.sh @@ -26,10 +26,11 @@ __print_args() { echo "Args: remote|remote-remove docker|kube --env-file [release] [auto-clean] [--stop-at-error] " echo " [--ricsim-prefix ] [--use-local-image +] [--use-snapshot-image +]" - echo " [--use-staging-image +] [--use-release-image +] [--image-repo +] [--use-release-image +] [--image-repo ]" echo " [--repo-policy local|remote] [--cluster-timeout ] [--print-stats]" echo " [--override ] [--pre-clean] [--gen-stats] [--delete-namespaces]" - echo " [--delete-containers] [--endpoint-stats]" + echo " [--delete-containers] [--endpoint-stats] [--kubeconfig ] [--host-path-dir ]" + echo " [--kubecontext ]" } if [ $# -eq 1 ] && [ "$1" == "help" ]; then @@ -64,6 +65,9 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then echo "--delete-namespaces - Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps." echo "--delete-containers - Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only." echo "--endpoint-stats - Collect endpoint statistics" + echo "--kubeconfig - Configure kubectl to use cluster specific cluster config file" + echo "--host-path-dir - (Base-)path on local-hostmounted to all VMs (nodes), for hostpath volumes in kube" + echo "--kubecontext - Configure kubectl to use a certain context, e.g 'minikube'" echo "" echo "List of app short names supported: "$APP_SHORT_NAMES exit 0 @@ -99,11 +103,6 @@ TEST_ENV_VAR_FILE_OVERRIDE="" echo "Test case started as: ${BASH_SOURCE[$i+1]} "$@ -#Localhost constants -LOCALHOST_NAME="localhost" -LOCALHOST_HTTP="http://localhost" -LOCALHOST_HTTPS="https://localhost" - # Var to hold 'auto' in case containers shall be stopped when test case ends AUTO_CLEAN="" @@ -266,6 +265,17 @@ DELETE_KUBE_NAMESPACES=0 #Var to control if containers shall be delete before test setup DELETE_CONTAINERS=0 +#Var to configure kubectl from a config file or context +KUBECONF="" + +#Var pointing to dir mounted to each kubernetes node (master and workers) +#Persistent volumes using "hostpath" are allocated beneath the point. +#Typically it is a dir on local host mounted to each VM running the master and worker. +#So the intention is make this dir available so the PODs can be restarted on any +#node and still access the persistent data +#If not set from cmd line, the path is defaults to "/tmp" +HOST_PATH_BASE_DIR="" + #File to keep deviation messages DEVIATION_FILE=".tmp_deviations" rm $DEVIATION_FILE &> /dev/null @@ -437,7 +447,7 @@ __collect_endpoint_stats_image_info() { return fi ENDPOINT_STAT_FILE=$TESTLOGS/$ATC/imageinfo_$ATC_$1".log" - echo $POLICY_AGENT_IMAGE > $ENDPOINT_STAT_FILE + echo $A1PMS_IMAGE > $ENDPOINT_STAT_FILE } #Var for measuring execution time @@ -810,7 +820,74 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do foundparm=0 fi fi - + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--kubeconfig" ]; then + shift; + if [ -z "$1" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="No path found for : '--kubeconfig'" + fi + else + if [ -f $1 ]; then + if [ ! -z "$KUBECONF" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="Only one of --kubeconfig/--kubecontext can be set" + fi + else + KUBECONF="--kubeconfig $1" + echo "Option set - Kubeconfig path: "$1 + shift; + foundparm=0 + fi + else + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="File $1 for --kubeconfig not found" + fi + fi + fi + fi + fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--kubecontext" ]; then + shift; + if [ -z "$1" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="No context-name found for : '--kubecontext'" + fi + else + if [ ! -z "$KUBECONF" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="Only one of --kubeconfig or --kubecontext can be set" + fi + else + KUBECONF="--context $1" + echo "Option set - Kubecontext name: "$1 + shift; + foundparm=0 + fi + fi + fi + fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--host-path-dir" ]; then + shift; + if [ -z "$1" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="No path found for : '--host-path-dir'" + fi + else + HOST_PATH_BASE_DIR=$1 + shift + foundparm=0 + fi + fi + fi done echo "" @@ -828,6 +905,14 @@ if [ $paramerror -eq 1 ]; then exit 1 fi +#Localhost constants +LOCALHOST_NAME="localhost" +# if [ ! -z "$DOCKER_HOST" ]; then +# LOCALHOST_NAME=$(echo $DOCKER_HOST | awk -F[/:] '{print $4}' ) +# fi +LOCALHOST_HTTP="http://$LOCALHOST_NAME" +LOCALHOST_HTTPS="https://$LOCALHOST_NAME" + # sourcing the selected env variables for the test case if [ -f "$TEST_ENV_VAR_FILE" ]; then echo -e $BOLD"Sourcing env vars from: "$TEST_ENV_VAR_FILE$EBOLD @@ -972,40 +1057,38 @@ fi echo " docker is installed and using versions:" echo " $(docker version --format 'Client version {{.Client.Version}} Server version {{.Server.Version}}')" -tmp=$(which docker-compose) -if [ $? -ne 0 ] || [ -z "$tmp" ]; then - if [ $RUNMODE == "DOCKER" ]; then +if [ $RUNMODE == "DOCKER" ]; then + tmp=$(which docker-compose) + if [ $? -ne 0 ] || [ -z "$tmp" ]; then echo -e $RED"docker-compose is required to run the test environment, pls install"$ERED exit 1 + else + tmp=$(docker-compose version --short) + echo " docker-compose installed and using version $tmp" + if [[ "$tmp" == *'v2'* ]]; then + DOCKER_COMPOSE_VERION="V2" + fi fi fi -tmp=$(docker-compose version --short) -echo " docker-compose installed and using version $tmp" -if [[ "$tmp" == *'v2'* ]]; then - DOCKER_COMPOSE_VERION="V2" -fi - -tmp=$(which kubectl) -if [ $? -ne 0 ] || [ -z tmp ]; then - if [ $RUNMODE == "KUBE" ]; then +if [ $RUNMODE == "KUBE" ]; then + tmp=$(which kubectl) + if [ $? -ne 0 ] || [ -z tmp ]; then echo -e $RED"kubectl is required to run the test environment in kubernetes mode, pls install"$ERED exit 1 - fi -else - if [ $RUNMODE == "KUBE" ]; then + else echo " kubectl is installed and using versions:" - echo $(kubectl version --short=true) | indent2 - res=$(kubectl cluster-info 2>&1) + echo $(kubectl $KUBECONF version --short=true) | indent2 + res=$(kubectl $KUBECONF cluster-info 2>&1) if [ $? -ne 0 ]; then echo -e "$BOLD$RED############################################# $ERED$EBOLD" - echo -e $BOLD$RED"Command 'kubectl cluster-info' returned error $ERED$EBOLD" + echo -e $BOLD$RED"Command 'kubectl '$KUBECONF' cluster-info' returned error $ERED$EBOLD" echo -e "$BOLD$RED############################################# $ERED$EBOLD" echo " " echo "kubectl response:" echo $res echo " " echo "This script may have been started with user with no permission to run kubectl" - echo "Try running with 'sudo' or set 'KUBECONFIG'" + echo "Try running with 'sudo', set env KUBECONFIG or set '--kubeconfig' parameter" echo "Do either 1, 2 or 3 " echo " " echo "1" @@ -1018,13 +1101,24 @@ else echo -e $BOLD"sudo -E "$EBOLD echo " " echo "3" - echo "Set KUBECONFIG inline (replace user)" - echo -e $BOLD"sudo KUBECONFIG='/home//.kube/config' "$EBOLD + echo "Set KUBECONFIG via script parameter" + echo -e $BOLD"sudo ... --kubeconfig /home//.kube/ ...."$EBOLD + echo "The config file need to downloaded from the cluster" exit 1 fi echo " Node(s) and container runtime config" - kubectl get nodes -o wide | indent2 + kubectl $KUBECONF get nodes -o wide | indent2 + echo "" + if [ -z "$HOST_PATH_BASE_DIR" ]; then + HOST_PATH_BASE_DIR="/tmp" + echo " Persistent volumes will be mounted to $HOST_PATH_BASE_DIR on applicable node" + echo " No guarantee that persistent volume data is available on all nodes in the cluster" + else + echo "Persistent volumes will be mounted to base dir: $HOST_PATH_BASE_DIR" + echo "Assuming this dir is mounted from each node to a dir on the localhost or other" + echo "file system available to all nodes" + fi fi fi @@ -1483,7 +1577,7 @@ setup_testenvironment() { if [ "$DELETE_KUBE_NAMESPACES" -eq 1 ]; then - test_env_namespaces=$(kubectl get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env + test_env_namespaces=$(kubectl $KUBECONF get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env if [ $? -ne 0 ]; then echo " Cannot get list of namespaces...ignoring delete" else @@ -1492,7 +1586,7 @@ setup_testenvironment() { done fi else - echo " Namespace delete option not set" + echo " Namespace delete option not set or ignored" fi echo "" @@ -1505,7 +1599,7 @@ setup_testenvironment() { echo " Removing stopped containers..." docker rm $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null else - echo " Contatiner delete option not set" + echo " Contatiner delete option not set or ignored" fi echo "" @@ -1682,8 +1776,8 @@ setup_testenvironment() { echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine" export KUBE_IMAGE_PULL_POLICY="Always" fi - CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}') - echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW + #CLUSTER_IP=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}') + #echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW echo "=================================================================================" echo "=================================================================================" @@ -2059,12 +2153,12 @@ __clean_containers() { # Get resource type for scaling # args: __kube_get_resource_type() { - kubectl get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo "deployment" return 0 fi - kubectl get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo "sts" return 0 @@ -2078,7 +2172,7 @@ __kube_get_resource_type() { # (Not for test scripts) __kube_scale() { echo -ne " Setting $1 $2 replicas=$4 in namespace $3"$SAMELINE - kubectl scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Setting $1 $2 replicas=$4 in namespace $3 $RED Failed $ERED" ((RES_CONF_FAIL++)) @@ -2091,7 +2185,7 @@ __kube_scale() { TSTART=$SECONDS for i in {1..500}; do - count=$(kubectl get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null) retcode=$? if [ -z "$count" ]; then #No value is sometimes returned for some reason, in case the resource has replica 0 @@ -2129,11 +2223,11 @@ __kube_scale_all_resources() { labelid=$3 resources="deployment replicaset statefulset" for restype in $resources; do - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') if [ $? -eq 0 ] && [ ! -z "$result" ]; then for resid in $result; do echo -ne " Ordered caling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"$SAMELINE - kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN" done fi @@ -2159,18 +2253,18 @@ __kube_scale_and_wait_all_resources() { scaled_all=0 for restype in $resources; do if [ -z "$3" ]; then - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}') else - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') fi if [ $? -eq 0 ] && [ ! -z "$result" ]; then for resid in $result; do echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0" - kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr count=1 T_START=$SECONDS while [ $count -ne 0 ]; do - count=$(kubectl get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null) echo -ne " Scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count"$SAMELINE if [ $? -eq 0 ] && [ ! -z "$count" ]; then sleep 0.5 @@ -2199,7 +2293,7 @@ __kube_delete_all_resources() { namespace=$1 labelname=$2 labelid=$3 - resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes serviceaccounts clusterrolebindings" + resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes serviceaccounts clusterrolebindings secrets" deleted_resourcetypes="" for restype in $resources; do ns_flag="-n $namespace" @@ -2212,14 +2306,14 @@ __kube_delete_all_resources() { ns_flag="" ns_text="" fi - result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') if [ $? -eq 0 ] && [ ! -z "$result" ]; then deleted_resourcetypes=$deleted_resourcetypes" "$restype for resid in $result; do if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then count=1 while [ $count -ne 0 ]; do - count=$(kubectl get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null) echo -ne " Scaling $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count"$SAMELINE if [ $? -eq 0 ] && [ ! -z "$count" ]; then sleep 0.5 @@ -2230,7 +2324,7 @@ __kube_delete_all_resources() { echo -e " Scaled $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN" fi echo -ne " Deleting $restype $resid $ns_text with label $labelname=$labelid "$SAMELINE - kubectl delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo -e " Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN OK $EGREEN" else @@ -2253,7 +2347,7 @@ __kube_delete_all_resources() { result="dummy" while [ ! -z "$result" ]; do sleep 0.5 - result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') echo -ne " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE if [ -z "$result" ]; then echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN" @@ -2272,17 +2366,17 @@ __kube_delete_all_resources() { __kube_create_namespace() { #Check if test namespace exists, if not create it - kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -ne " Creating namespace "$1 $SAMELINE - kubectl create namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF create namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating namespace $1 $RED$BOLD FAILED $EBOLD$ERED" ((RES_CONF_FAIL++)) echo " Message: $(<./tmp/kubeerr)" return 1 else - kubectl label ns $1 autotest=engine + kubectl $KUBECONF label ns $1 autotest=engine > /dev/null echo -e " Creating namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN" fi else @@ -2297,10 +2391,10 @@ __kube_create_namespace() { __kube_delete_namespace() { #Check if test namespace exists, if so remove it - kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo -ne " Removing namespace "$1 $SAMELINE - kubectl delete namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF delete namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED" ((RES_CONF_FAIL++)) @@ -2346,7 +2440,7 @@ __kube_get_service_host() { exit 1 fi for timeout in {1..60}; do - host=$(kubectl get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}') + host=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}') if [ $? -eq 0 ]; then if [ ! -z "$host" ]; then echo $host @@ -2371,7 +2465,7 @@ __kube_get_service_port() { fi for timeout in {1..60}; do - port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}') + port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}') if [ $? -eq 0 ]; then if [ ! -z "$port" ]; then echo $port @@ -2396,7 +2490,7 @@ __kube_get_service_nodeport() { fi for timeout in {1..60}; do - port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}') + port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}') if [ $? -eq 0 ]; then if [ ! -z "$port" ]; then echo $port @@ -2416,7 +2510,7 @@ __kube_get_service_nodeport() { __kube_create_instance() { echo -ne " Creating $1 $2"$SAMELINE envsubst < $3 > $4 - kubectl apply -f $4 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF apply -f $4 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then ((RES_CONF_FAIL++)) echo -e " Creating $1 $2 $RED Failed $ERED" @@ -2434,21 +2528,21 @@ __kube_create_configmap() { echo -ne " Creating configmap $1 "$SAMELINE envsubst < $5 > $5"_tmp" cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name - kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6 + kubectl $KUBECONF create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6 if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Failed $ERED" ((RES_CONF_FAIL++)) return 1 fi - kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF apply -f $6 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Apply failed $ERED" echo " Message: $(<./tmp/kubeerr)" ((RES_CONF_FAIL++)) return 1 fi - kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Labeling failed $ERED" echo " Message: $(<./tmp/kubeerr)" @@ -2456,7 +2550,7 @@ __kube_create_configmap() { return 1 fi # Log the resulting map - kubectl get configmap $1 -n $2 -o yaml > $6 + kubectl $KUBECONF get configmap $1 -n $2 -o yaml > $6 echo -e " Creating configmap $1 $GREEN OK $EGREEN" return 0 @@ -2500,7 +2594,7 @@ __kube_clean_pvc() { envsubst < $input_yaml > $output_yaml - kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing + kubectl $KUBECONF delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing __kube_create_instance pod $PVC_CLEANER_APP_NAME $input_yaml $output_yaml if [ $? -ne 0 ]; then @@ -2510,7 +2604,7 @@ __kube_clean_pvc() { term_ts=$(($SECONDS+30)) while [ $term_ts -gt $SECONDS ]; do - pod_status=$(kubectl get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase") + pod_status=$(kubectl $KUBECONF get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase") if [ "$pod_status" == "Succeeded" ]; then return 0 fi @@ -2579,14 +2673,14 @@ clean_environment() { __clean_kube if [ $PRE_CLEAN -eq 1 ]; then echo " Cleaning docker resouces to free up resources, may take time..." - ../common/clean_docker.sh 2>&1 /dev/null + ../common/clean_docker.sh 2>&1 > /dev/null echo "" fi else __clean_containers if [ $PRE_CLEAN -eq 1 ]; then echo " Cleaning kubernetes resouces to free up resources, may take time..." - ../common/clean_kube.sh 2>&1 /dev/null + ../common/clean_kube.sh $KUBECONF 2>&1 > /dev/null echo "" fi fi @@ -2763,7 +2857,7 @@ __check_service_start() { echo -ne " Container $BOLD${appname}$EBOLD starting${SAMELINE}" - pa_st=false + a1pmsst=false echo -ne " Waiting for ${ENTITY} ${appname} service status...${SAMELINE}" TSTART=$SECONDS loop_ctr=0 @@ -2776,7 +2870,7 @@ __check_service_start() { fi echo -ne " Waiting for {ENTITY} $BOLD${appname}$EBOLD service status on ${3}, result: $result${SAMELINE}" echo -ne " The ${ENTITY} $BOLD${appname}$EBOLD$GREEN is alive$EGREEN, responds to service status:$GREEN $result $EGREEN on ${url} after $(($SECONDS-$TSTART)) seconds" - pa_st=true + a1pmsst=true break else TS_TMP=$SECONDS @@ -2792,7 +2886,7 @@ __check_service_start() { let loop_ctr=loop_ctr+1 done - if [ "$pa_st" = "false" ]; then + if [ "$a1pmsst" = "false" ]; then ((RES_CONF_FAIL++)) echo -e $RED" The ${ENTITY} ${appname} did not respond to service status on ${url} in $(($SECONDS-$TSTART)) seconds"$ERED return 1 @@ -2824,7 +2918,7 @@ __check_container_logs() { #tmp=$(docker ps | grep $appname) tmp=$(docker ps -q --filter name=$appname) #get the container id - if [ -z "$tmp" ]; then #Only check logs for running Policy Agent apps + if [ -z "$tmp" ]; then #Only check logs for running A1PMS apps echo " "$dispname" is not running, no check made" return fi @@ -2887,11 +2981,11 @@ store_logs() { done fi if [ $RUNMODE == "KUBE" ]; then - namespaces=$(kubectl get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}') + namespaces=$(kubectl $KUBECONF get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}') for nsid in $namespaces; do - pods=$(kubectl get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}') + pods=$(kubectl $KUBECONF get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}') for podid in $pods; do - kubectl logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log + kubectl $KUBECONF logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log done done fi @@ -2902,6 +2996,7 @@ store_logs() { ## Generic curl ############### # Generic curl function, assumes all 200-codes are ok +# Used proxy, set # args: # returns: or "" or ">"" # returns: The return code is 0 for ok and 1 for not ok @@ -2948,6 +3043,46 @@ __do_curl() { fi } +# Generic curl function, assumes all 200-codes are ok +# Uses no proxy, even if it is set +# args: +# returns: or "" or ">"" +# returns: The return code is 0 for ok and 1 for not ok +__do_curl_no_proxy() { + echo ${FUNCNAME[1]} "line: "${BASH_LINENO[1]} >> $HTTPLOG + curlString="curl -skw %{http_code} $@" + echo " CMD: $curlString" >> $HTTPLOG + res=$($curlString) + retcode=$? + echo " RESP: $res" >> $HTTPLOG + echo " RETCODE: $retcode" >> $HTTPLOG + if [ $retcode -ne 0 ]; then + echo "" + return 1 + fi + http_code="${res:${#res}-3}" + if [ ${#res} -eq 3 ]; then + if [ $http_code -lt 200 ] || [ $http_code -gt 299 ]; then + echo "" + return 1 + else + return 0 + fi + else + if [ $http_code -lt 200 ] || [ $http_code -gt 299 ]; then + echo "" + return 1 + fi + if [ $# -eq 2 ]; then + echo "${res:0:${#res}-3}" | xargs + else + echo "${res:0:${#res}-3}" + fi + + return 0 + fi +} + ####################################### ### Basic helper function for test cases #######################################