X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=test%2Fcommon%2Ftestcase_common.sh;h=a94d3e2274b0b6d7e2e43bc5567e6b5eaff29084;hb=cb6113ef6f7519274d6420c649f714818dd68b23;hp=80059a1671ab32a0e0c51698a697b8e62df38398;hpb=5feecd881172a3b22041d35443c1f946e7d5f63e;p=nonrtric.git diff --git a/test/common/testcase_common.sh b/test/common/testcase_common.sh index 80059a16..a94d3e22 100755 --- a/test/common/testcase_common.sh +++ b/test/common/testcase_common.sh @@ -26,9 +26,10 @@ __print_args() { echo "Args: remote|remote-remove docker|kube --env-file [release] [auto-clean] [--stop-at-error] " echo " [--ricsim-prefix ] [--use-local-image +] [--use-snapshot-image +]" - echo " [--use-staging-image +] [--use-release-image +] [--image-repo +] [--use-release-image +] [--image-repo ]" echo " [--repo-policy local|remote] [--cluster-timeout ] [--print-stats]" - echo " [--override --pre-clean --gen-stats]" + echo " [--override ] [--pre-clean] [--gen-stats] [--delete-namespaces]" + echo " [--delete-containers] [--endpoint-stats] [--kubeconfig ]" } if [ $# -eq 1 ] && [ "$1" == "help" ]; then @@ -60,7 +61,10 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then echo "--override - Override setting from the file supplied by --env-file" echo "--pre-clean - Will clean kube resouces when running docker and vice versa" echo "--gen-stats - Collect container/pod runtime statistics" - + echo "--delete-namespaces - Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps." + echo "--delete-containers - Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only." + echo "--endpoint-stats - Collect endpoint statistics" + echo "--kubeconfig - Configure kubectl to use cluster specific cluster config file" echo "" echo "List of app short names supported: "$APP_SHORT_NAMES exit 0 @@ -163,18 +167,52 @@ TESTLOGS=$PWD/logs # files in the ./tmp is moved to ./tmp/prev when a new test is started if [ ! -d "tmp" ]; then mkdir tmp + if [ $? -ne 0 ]; then + echo "Cannot create dir for temp files, $PWD/tmp" + echo "Exiting...." + exit 1 + fi fi curdir=$PWD cd tmp if [ $? -ne 0 ]; then echo "Cannot cd to $PWD/tmp" - echo "Dir cannot be created. Exiting...." + echo "Exiting...." + exit 1 fi + +TESTENV_TEMP_FILES=$PWD + if [ ! -d "prev" ]; then mkdir prev + if [ $? -ne 0 ]; then + echo "Cannot create dir for previous temp files, $PWD/prev" + echo "Exiting...." + exit 1 + fi +fi + +TMPFILES=$(ls -A | grep -vw prev) +if [ ! -z "$TMPFILES" ]; then + cp -r $TMPFILES prev #Move all temp files to prev dir + if [ $? -ne 0 ]; then + echo "Cannot move temp files in $PWD to previous temp files in, $PWD/prev" + echo "Exiting...." + exit 1 + fi + if [ $(pwd | xargs basename) == "tmp" ]; then #Check that current dir is tmp...for safety + + rm -rf $TMPFILES # Remove all temp files + fi fi + cd $curdir -mv ./tmp/* ./tmp/prev 2> /dev/null +if [ $? -ne 0 ]; then + echo "Cannot cd to $curdir" + echo "Exiting...." + exit 1 +fi + # Create a http message log for this testcase HTTPLOG=$PWD"/.httplog_"$ATC".txt" @@ -199,6 +237,9 @@ rm $TESTLOGS/$ATC/*.log &> /dev/null rm $TESTLOGS/$ATC/*.txt &> /dev/null rm $TESTLOGS/$ATC/*.json &> /dev/null +#Create result file in the log dir +echo "1" > "$TESTLOGS/$ATC/.result$ATC.txt" + # Log all output from the test case to a TC log TCLOG=$TESTLOGS/$ATC/TC.log exec &> >(tee ${TCLOG}) @@ -215,6 +256,19 @@ PRINT_CURRENT_STATS=0 #Var to control if container/pod runtim statistics shall be collected COLLECT_RUNTIME_STATS=0 +COLLECT_RUNTIME_STATS_PID=0 + +#Var to control if endpoint statistics shall be collected +COLLECT_ENDPOINT_STATS=0 + +#Var to control if namespaces shall be delete before test setup +DELETE_KUBE_NAMESPACES=0 + +#Var to control if containers shall be delete before test setup +DELETE_CONTAINERS=0 + +#Var to configure kubectl from a config file. +KUBECONF="" #File to keep deviation messages DEVIATION_FILE=".tmp_deviations" @@ -231,8 +285,13 @@ trap_fnc() { } trap trap_fnc ERR -# Trap to kill subprocesses -trap "kill 0" EXIT +# Trap to kill subprocess for stats collection (if running) +trap_fnc2() { + if [ $COLLECT_RUNTIME_STATS_PID -ne 0 ]; then + kill $COLLECT_RUNTIME_STATS_PID + fi +} +trap trap_fnc2 EXIT # Counter for tests TEST_SEQUENCE_NR=1 @@ -347,6 +406,44 @@ __log_conf_ok() { __print_current_stats } +# Function to collect stats on endpoints +# args: [] +__collect_endpoint_stats() { + if [ $COLLECT_ENDPOINT_STATS -eq 0 ]; then + return + fi + ENDPOINT_COUNT=1 + if [ $# -gt 5 ]; then + ENDPOINT_COUNT=$6 + fi + ENDPOINT_STAT_FILE=$TESTLOGS/$ATC/endpoint_$ATC_$1_$2".log" + ENDPOINT_POS=0 + ENDPOINT_NEG=0 + if [ -f $ENDPOINT_STAT_FILE ]; then + ENDPOINT_VAL=$(< $ENDPOINT_STAT_FILE) + ENDPOINT_POS=$(echo $ENDPOINT_VAL | cut -f4 -d ' ' | cut -f1 -d '/') + ENDPOINT_NEG=$(echo $ENDPOINT_VAL | cut -f5 -d ' ' | cut -f1 -d '/') + fi + + if [ $5 -ge 200 ] && [ $5 -lt 300 ]; then + let ENDPOINT_POS=ENDPOINT_POS+$ENDPOINT_COUNT + else + let ENDPOINT_NEG=ENDPOINT_NEG+$ENDPOINT_COUNT + fi + + printf '%-2s %-10s %-45s %-16s %-16s' "#" "$3" "$4" "$ENDPOINT_POS/$ENDPOINT_POS" "$ENDPOINT_NEG/$ENDPOINT_NEG" > $ENDPOINT_STAT_FILE +} + +# Function to collect stats on endpoints +# args: +__collect_endpoint_stats_image_info() { + if [ $COLLECT_ENDPOINT_STATS -eq 0 ]; then + return + fi + ENDPOINT_STAT_FILE=$TESTLOGS/$ATC/imageinfo_$ATC_$1".log" + echo $POLICY_AGENT_IMAGE > $ENDPOINT_STAT_FILE +} + #Var for measuring execution time TCTEST_START=$SECONDS @@ -361,7 +458,7 @@ TC_TIMER_CURRENT_FAILS="" # Then numer of failed test when timer starts. TIMER_MEASUREMENTS=".timer_measurement.txt" echo -e "Activity \t Duration \t Info" > $TIMER_MEASUREMENTS -# If this is set, some images (control by the parameter repo-polcy) will be re-tagged and pushed to this repo before any +# If this is set, some images (controlled by the parameter repo-policy) will be re-tagged and pushed to this repo before any IMAGE_REPO_ADR="" IMAGE_REPO_POLICY="local" CLUSTER_TIME_OUT=0 @@ -679,7 +776,67 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do foundparm=0 fi fi - + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--delete-namespaces" ]; then + if [ $RUNMODE == "DOCKER" ]; then + DELETE_KUBE_NAMESPACES=0 + echo "Option ignored - Delete namespaces (ignored when running docker)" + else + if [ -z "KUBE_PRESTARTED_IMAGES" ]; then + DELETE_KUBE_NAMESPACES=0 + echo "Option ignored - Delete namespaces (ignored when using prestarted apps)" + else + DELETE_KUBE_NAMESPACES=1 + echo "Option set - Delete namespaces" + fi + fi + shift; + foundparm=0 + fi + fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--delete-containers" ]; then + if [ $RUNMODE == "DOCKER" ]; then + DELETE_CONTAINERS=1 + echo "Option set - Delete containers started by previous test(s)" + else + echo "Option ignored - Delete containers (ignored when running kube)" + fi + shift; + foundparm=0 + fi + fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--endpoint-stats" ]; then + COLLECT_ENDPOINT_STATS=1 + echo "Option set - Collect endpoint statistics" + shift; + foundparm=0 + fi + fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--kubeconfig" ]; then + shift; + if [ -z "$1" ]; then + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="No path found for : '--kubeconfig'" + fi + else + if [ -f $1 ]; then + KUBECONF="--kubeconfig $1" + echo "Option set - Kubeconfig path: "$1 + shift; + foundparm=0 + else + paramerror=1 + if [ -z "$paramerror_str" ]; then + paramerror_str="File $1 for --kubeconfig not found" + fi + fi + fi + fi + fi done echo "" @@ -756,6 +913,7 @@ fi echo "" # auto adding system apps +__added_apps="" echo -e $BOLD"Auto adding system apps"$EBOLD if [ $RUNMODE == "KUBE" ]; then INCLUDED_IMAGES=$INCLUDED_IMAGES" "$TESTENV_KUBE_SYSTEM_APPS @@ -768,22 +926,47 @@ if [ ! -z "$TMP_APPS" ]; then for iapp in "$TMP_APPS"; do file_pointer=$(echo $iapp | tr '[:upper:]' '[:lower:]') file_pointer="../common/"$file_pointer"_api_functions.sh" - echo " Auto-adding system app $iapp. Sourcing $file_pointer" + padded_iapp=$iapp + while [ ${#padded_iapp} -lt 16 ]; do + padded_iapp=$padded_iapp" " + done + echo " Auto-adding system app $padded_iapp Sourcing $file_pointer" . $file_pointer + if [ $? -ne 0 ]; then + echo " Include file $file_pointer contain errors. Exiting..." + exit 1 + fi + __added_apps=" $iapp "$__added_apps done else echo " None" fi +if [ $RUNMODE == "KUBE" ]; then + TMP_APPS=$INCLUDED_IMAGES" "$KUBE_PRESTARTED_IMAGES +else + TMP_APPS=$INCLUDED_IMAGES +fi + echo -e $BOLD"Auto adding included apps"$EBOLD - for iapp in $INCLUDED_IMAGES; do - file_pointer=$(echo $iapp | tr '[:upper:]' '[:lower:]') - file_pointer="../common/"$file_pointer"_api_functions.sh" - echo " Auto-adding included app $iapp. Sourcing $file_pointer" - . $file_pointer - if [ ! -f "$file_pointer" ]; then - echo " Include file $file_pointer for app $iapp does not exist" - exit 1 + for iapp in $TMP_APPS; do + if [[ "$__added_apps" != *"$iapp"* ]]; then + file_pointer=$(echo $iapp | tr '[:upper:]' '[:lower:]') + file_pointer="../common/"$file_pointer"_api_functions.sh" + padded_iapp=$iapp + while [ ${#padded_iapp} -lt 16 ]; do + padded_iapp=$padded_iapp" " + done + echo " Auto-adding included app $padded_iapp Sourcing $file_pointer" + if [ ! -f "$file_pointer" ]; then + echo " Include file $file_pointer for app $iapp does not exist" + exit 1 + fi + . $file_pointer + if [ $? -ne 0 ]; then + echo " Include file $file_pointer contain errors. Exiting..." + exit 1 + fi fi done echo "" @@ -838,17 +1021,17 @@ else if [ $RUNMODE == "KUBE" ]; then echo " kubectl is installed and using versions:" echo $(kubectl version --short=true) | indent2 - res=$(kubectl cluster-info 2>&1) + res=$(kubectl $KUBECONF cluster-info 2>&1) if [ $? -ne 0 ]; then echo -e "$BOLD$RED############################################# $ERED$EBOLD" - echo -e $BOLD$RED"Command 'kubectl cluster-info' returned error $ERED$EBOLD" + echo -e $BOLD$RED"Command 'kubectl '$KUBECONF' cluster-info' returned error $ERED$EBOLD" echo -e "$BOLD$RED############################################# $ERED$EBOLD" echo " " echo "kubectl response:" echo $res echo " " echo "This script may have been started with user with no permission to run kubectl" - echo "Try running with 'sudo' or set 'KUBECONFIG'" + echo "Try running with 'sudo', set env KUBECONFIG or set '--kubeconfig' parameter" echo "Do either 1, 2 or 3 " echo " " echo "1" @@ -861,11 +1044,14 @@ else echo -e $BOLD"sudo -E "$EBOLD echo " " echo "3" - echo "Set KUBECONFIG inline (replace user)" - echo -e $BOLD"sudo KUBECONFIG='/home//.kube/config' "$EBOLD + echo "Set KUBECONFIG via script parameter" + echo -e $BOLD"sudo ... --kubeconfig /home//.kube/ ...."$EBOLD + echo "The config file need to downloaded from the cluster" exit 1 fi + echo " Node(s) and container runtime config" + kubectl $KUBECONF get nodes -o wide | indent2 fi fi @@ -1279,6 +1465,9 @@ setup_testenvironment() { # If the image suffix is none, then the component decides the suffix function_pointer="__"$imagename"_imagesetup" $function_pointer $IMAGE_SUFFIX + + function_pointer="__"$imagename"_test_requirements" + $function_pointer fi done @@ -1316,9 +1505,38 @@ setup_testenvironment() { #Temp var to check for image pull errors IMAGE_ERR=0 - # The following sequence pull the configured images + # Delete namespaces + echo -e $BOLD"Deleting namespaces"$EBOLD + + + if [ "$DELETE_KUBE_NAMESPACES" -eq 1 ]; then + test_env_namespaces=$(kubectl $KUBECONF get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env + if [ $? -ne 0 ]; then + echo " Cannot get list of namespaces...ignoring delete" + else + for test_env_ns in $test_env_namespaces; do + __kube_delete_namespace $test_env_ns + done + fi + else + echo " Namespace delete option not set or ignored" + fi + echo "" + # Delete containers + echo -e $BOLD"Deleting containers"$EBOLD + if [ "$DELETE_CONTAINERS" -eq 1 ]; then + echo " Stopping containers label 'nrttest_app'..." + docker stop $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null + echo " Removing stopped containers..." + docker rm $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null + else + echo " Contatiner delete option not set or ignored" + fi + echo "" + + # The following sequence pull the configured images echo -e $BOLD"Pulling configured images, if needed"$EBOLD if [ ! -z "$IMAGE_REPO_ADR" ] && [ $IMAGE_REPO_POLICY == "local" ]; then echo -e $YELLOW" Excluding all remote image check/pull when running with image repo: $IMAGE_REPO_ADR and image policy $IMAGE_REPO_POLICY"$EYELLOW @@ -1491,7 +1709,7 @@ setup_testenvironment() { echo -e " Pulling remote snapshot or staging images my in some case result in pulling newer image versions outside the control of the test engine" export KUBE_IMAGE_PULL_POLICY="Always" fi - CLUSTER_IP=$(kubectl config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}') + CLUSTER_IP=$(kubectl $KUBECONF config view -o jsonpath={.clusters[0].cluster.server} | awk -F[/:] '{print $4}') echo -e $YELLOW" The cluster hostname/ip is: $CLUSTER_IP"$EYELLOW echo "=================================================================================" @@ -1529,6 +1747,7 @@ setup_testenvironment() { if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then ../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS & + COLLECT_RUNTIME_STATS_PID=$! fi } @@ -1610,6 +1829,7 @@ print_result() { fi #Create file with OK exit code echo "0" > "$AUTOTEST_HOME/.result$ATC.txt" + echo "0" > "$TESTLOGS/$ATC/.result$ATC.txt" else echo -e "One or more tests with status \033[31m\033[1mFAIL\033[0m " echo -e "\033[31m\033[1m ___ _ ___ _ \033[0m" @@ -1706,6 +1926,16 @@ __check_stop_at_error() { if [ $STOP_AT_ERROR -eq 1 ]; then echo -e $RED"Test script configured to stop at first FAIL, taking all logs and stops"$ERED store_logs "STOP_AT_ERROR" + + # Update test suite counter + if [ -f .tmp_tcsuite_fail_ctr ]; then + tmpval=$(< .tmp_tcsuite_fail_ctr) + ((tmpval++)) + echo $tmpval > .tmp_tcsuite_fail_ctr + fi + if [ -f .tmp_tcsuite_fail ]; then + echo " - "$ATC " -- "$TC_ONELINE_DESCR" Execution stopped due to error" >> .tmp_tcsuite_fail + fi exit 1 fi return 0 @@ -1856,12 +2086,12 @@ __clean_containers() { # Get resource type for scaling # args: __kube_get_resource_type() { - kubectl get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo "deployment" return 0 fi - kubectl get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo "sts" return 0 @@ -1875,7 +2105,7 @@ __kube_get_resource_type() { # (Not for test scripts) __kube_scale() { echo -ne " Setting $1 $2 replicas=$4 in namespace $3"$SAMELINE - kubectl scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $1 $2 -n $3 --replicas=$4 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Setting $1 $2 replicas=$4 in namespace $3 $RED Failed $ERED" ((RES_CONF_FAIL++)) @@ -1888,7 +2118,7 @@ __kube_scale() { TSTART=$SECONDS for i in {1..500}; do - count=$(kubectl get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $1/$2 -n $3 -o jsonpath='{.status.replicas}' 2> /dev/null) retcode=$? if [ -z "$count" ]; then #No value is sometimes returned for some reason, in case the resource has replica 0 @@ -1926,11 +2156,11 @@ __kube_scale_all_resources() { labelid=$3 resources="deployment replicaset statefulset" for restype in $resources; do - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') if [ $? -eq 0 ] && [ ! -z "$result" ]; then for resid in $result; do echo -ne " Ordered caling $restype $resid in namespace $namespace with label $labelname=$labelid to 0"$SAMELINE - kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0 $GREEN OK $EGREEN" done fi @@ -1956,18 +2186,18 @@ __kube_scale_and_wait_all_resources() { scaled_all=0 for restype in $resources; do if [ -z "$3" ]; then - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname')].metadata.name}') else - result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') fi if [ $? -eq 0 ] && [ ! -z "$result" ]; then for resid in $result; do echo -e " Ordered scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0" - kubectl scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF scale $restype $resid -n $namespace --replicas=0 1> /dev/null 2> ./tmp/kubeerr count=1 T_START=$SECONDS while [ $count -ne 0 ]; do - count=$(kubectl get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $restype $resid -n $namespace -o jsonpath='{.status.replicas}' 2> /dev/null) echo -ne " Scaling $restype $resid in namespace $namespace with label $labelname=$labelid to 0, current count=$count"$SAMELINE if [ $? -eq 0 ] && [ ! -z "$count" ]; then sleep 0.5 @@ -1996,7 +2226,7 @@ __kube_delete_all_resources() { namespace=$1 labelname=$2 labelid=$3 - resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes" + resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes serviceaccounts clusterrolebindings" deleted_resourcetypes="" for restype in $resources; do ns_flag="-n $namespace" @@ -2005,14 +2235,18 @@ __kube_delete_all_resources() { ns_flag="" ns_text="" fi - result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + if [ $restype == "clusterrolebindings" ]; then + ns_flag="" + ns_text="" + fi + result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') if [ $? -eq 0 ] && [ ! -z "$result" ]; then deleted_resourcetypes=$deleted_resourcetypes" "$restype for resid in $result; do if [ $restype == "replicaset" ] || [ $restype == "statefulset" ]; then count=1 while [ $count -ne 0 ]; do - count=$(kubectl get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null) + count=$(kubectl $KUBECONF get $restype $resid $ns_flag -o jsonpath='{.status.replicas}' 2> /dev/null) echo -ne " Scaling $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count"$SAMELINE if [ $? -eq 0 ] && [ ! -z "$count" ]; then sleep 0.5 @@ -2023,7 +2257,7 @@ __kube_delete_all_resources() { echo -e " Scaled $restype $resid $ns_text with label $labelname=$labelid to 0, current count=$count $GREEN OK $EGREEN" fi echo -ne " Deleting $restype $resid $ns_text with label $labelname=$labelid "$SAMELINE - kubectl delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF delete --grace-period=1 $restype $resid $ns_flag 1> /dev/null 2> ./tmp/kubeerr if [ $? -eq 0 ]; then echo -e " Deleted $restype $resid $ns_text with label $labelname=$labelid $GREEN OK $EGREEN" else @@ -2046,7 +2280,7 @@ __kube_delete_all_resources() { result="dummy" while [ ! -z "$result" ]; do sleep 0.5 - result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') + result=$(kubectl $KUBECONF get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}') echo -ne " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds "$SAMELINE if [ -z "$result" ]; then echo -e " Waiting for $restype $ns_text with label $labelname=$labelid to be deleted...$(($SECONDS-$T_START)) seconds $GREEN OK $EGREEN" @@ -2065,16 +2299,17 @@ __kube_delete_all_resources() { __kube_create_namespace() { #Check if test namespace exists, if not create it - kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -ne " Creating namespace "$1 $SAMELINE - kubectl create namespace $1 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF create namespace $1 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating namespace $1 $RED$BOLD FAILED $EBOLD$ERED" ((RES_CONF_FAIL++)) echo " Message: $(<./tmp/kubeerr)" return 1 else + kubectl $KUBECONF label ns $1 autotest=engine echo -e " Creating namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN" fi else @@ -2083,6 +2318,51 @@ __kube_create_namespace() { return 0 } +# Removes a namespace if it exists +# args: +# (Not for test scripts) +__kube_delete_namespace() { + + #Check if test namespace exists, if so remove it + kubectl $KUBECONF get namespace $1 1> /dev/null 2> ./tmp/kubeerr + if [ $? -eq 0 ]; then + echo -ne " Removing namespace "$1 $SAMELINE + kubectl $KUBECONF delete namespace $1 1> /dev/null 2> ./tmp/kubeerr + if [ $? -ne 0 ]; then + echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED" + ((RES_CONF_FAIL++)) + echo " Message: $(<./tmp/kubeerr)" + return 1 + else + echo -e " Removing namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN" + fi + else + echo -e " Namespace $1 $GREEN$BOLD does not exist, OK $EBOLD$EGREEN" + fi + return 0 +} + +# Removes a namespace +# args: +# (Not for test scripts) +clean_and_create_namespace() { + __log_conf_start $@ + + if [ $# -ne 1 ]; then + __print_err "" $@ + return 1 + fi + __kube_delete_namespace $1 + if [ $? -ne 0 ]; then + return 1 + fi + __kube_create_namespace $1 + if [ $? -ne 0 ]; then + return 1 + fi + +} + # Find the host ip of an app (using the service resource) # args: # (Not for test scripts) @@ -2093,7 +2373,7 @@ __kube_get_service_host() { exit 1 fi for timeout in {1..60}; do - host=$(kubectl get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}') + host=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{.spec.clusterIP}') if [ $? -eq 0 ]; then if [ ! -z "$host" ]; then echo $host @@ -2118,7 +2398,7 @@ __kube_get_service_port() { fi for timeout in {1..60}; do - port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}') + port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].port}') if [ $? -eq 0 ]; then if [ ! -z "$port" ]; then echo $port @@ -2143,7 +2423,7 @@ __kube_get_service_nodeport() { fi for timeout in {1..60}; do - port=$(kubectl get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}') + port=$(kubectl $KUBECONF get svc $1 -n $2 -o jsonpath='{...ports[?(@.name=="'$3'")].nodePort}') if [ $? -eq 0 ]; then if [ ! -z "$port" ]; then echo $port @@ -2163,7 +2443,7 @@ __kube_get_service_nodeport() { __kube_create_instance() { echo -ne " Creating $1 $2"$SAMELINE envsubst < $3 > $4 - kubectl apply -f $4 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF apply -f $4 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then ((RES_CONF_FAIL++)) echo -e " Creating $1 $2 $RED Failed $ERED" @@ -2181,21 +2461,21 @@ __kube_create_configmap() { echo -ne " Creating configmap $1 "$SAMELINE envsubst < $5 > $5"_tmp" cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name - kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6 + kubectl $KUBECONF create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6 if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Failed $ERED" ((RES_CONF_FAIL++)) return 1 fi - kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF apply -f $6 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Apply failed $ERED" echo " Message: $(<./tmp/kubeerr)" ((RES_CONF_FAIL++)) return 1 fi - kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr + kubectl $KUBECONF label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr if [ $? -ne 0 ]; then echo -e " Creating configmap $1 $RED Labeling failed $ERED" echo " Message: $(<./tmp/kubeerr)" @@ -2203,7 +2483,7 @@ __kube_create_configmap() { return 1 fi # Log the resulting map - kubectl get configmap $1 -n $2 -o yaml > $6 + kubectl $KUBECONF get configmap $1 -n $2 -o yaml > $6 echo -e " Creating configmap $1 $GREEN OK $EGREEN" return 0 @@ -2241,12 +2521,13 @@ __kube_clean_pvc() { export PVC_CLEANER_NAMESPACE=$2 export PVC_CLEANER_CLAIMNAME=$3 export PVC_CLEANER_RM_PATH=$4 + export PVC_CLEANER_APP_NAME input_yaml=$SIM_GROUP"/"$PVC_CLEANER_COMPOSE_DIR"/"pvc-cleaner.yaml output_yaml=$PWD/tmp/$2-pvc-cleaner.yaml envsubst < $input_yaml > $output_yaml - kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing + kubectl $KUBECONF delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing __kube_create_instance pod $PVC_CLEANER_APP_NAME $input_yaml $output_yaml if [ $? -ne 0 ]; then @@ -2256,7 +2537,7 @@ __kube_clean_pvc() { term_ts=$(($SECONDS+30)) while [ $term_ts -gt $SECONDS ]; do - pod_status=$(kubectl get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase") + pod_status=$(kubectl $KUBECONF get pod pvc-cleaner -n $PVC_CLEANER_NAMESPACE --no-headers -o custom-columns=":status.phase") if [ "$pod_status" == "Succeeded" ]; then return 0 fi @@ -2325,14 +2606,14 @@ clean_environment() { __clean_kube if [ $PRE_CLEAN -eq 1 ]; then echo " Cleaning docker resouces to free up resources, may take time..." - ../common/clean_docker.sh 2&>1 /dev/null + ../common/clean_docker.sh 2>&1 /dev/null echo "" fi else __clean_containers if [ $PRE_CLEAN -eq 1 ]; then echo " Cleaning kubernetes resouces to free up resources, may take time..." - ../common/clean_kube.sh 2&>1 /dev/null + ../common/clean_kube.sh $KUBECONF 2>&1 /dev/null echo "" fi fi @@ -2380,6 +2661,7 @@ __print_err() { echo -e $RED" Got: "${FUNCNAME[1]} ${@:2} $ERED fi ((RES_CONF_FAIL++)) + __check_stop_at_error } # Function to create the docker network for the test @@ -2435,9 +2717,14 @@ __start_container() { envsubst < $compose_file > "gen_"$compose_file compose_file="gen_"$compose_file + if [ $DOCKER_COMPOSE_VERION == "V1" ]; then + docker_compose_cmd="docker-compose" + else + docker_compose_cmd="docker compose" + fi if [ "$compose_args" == "NODOCKERARGS" ]; then - docker-compose -f $compose_file up -d &> .dockererr + $docker_compose_cmd -f $compose_file up -d &> .dockererr if [ $? -ne 0 ]; then echo -e $RED"Problem to launch container(s) with docker-compose"$ERED cat .dockererr @@ -2445,7 +2732,7 @@ __start_container() { exit 1 fi else - docker-compose -f $compose_file up -d $compose_args &> .dockererr + $docker_compose_cmd -f $compose_file up -d $compose_args &> .dockererr if [ $? -ne 0 ]; then echo -e $RED"Problem to launch container(s) with docker-compose"$ERED cat .dockererr @@ -2627,11 +2914,11 @@ store_logs() { done fi if [ $RUNMODE == "KUBE" ]; then - namespaces=$(kubectl get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}') + namespaces=$(kubectl $KUBECONF get namespaces -o jsonpath='{.items[?(@.metadata.name)].metadata.name}') for nsid in $namespaces; do - pods=$(kubectl get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}') + pods=$(kubectl $KUBECONF get pods -n $nsid -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}') for podid in $pods; do - kubectl logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log + kubectl $KUBECONF logs -n $nsid $podid > $TESTLOGS/$ATC/$1_${podid}.log done done fi @@ -2738,25 +3025,31 @@ __var_test() { __check_stop_at_error return fi - elif [ $4 = "=" ] && [ "$result" -eq $5 ]; then + elif [ "$4" == "=" ] && [ "$result" -eq $5 ]; then + ((RES_PASS++)) + echo -e " Result=${result} after ${duration} seconds${SAMELINE}" + echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds" + __print_current_stats + return + elif [ "$4" == ">" ] && [ "$result" -gt $5 ]; then ((RES_PASS++)) echo -e " Result=${result} after ${duration} seconds${SAMELINE}" echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds" __print_current_stats return - elif [ $4 = ">" ] && [ "$result" -gt $5 ]; then + elif [ "$4" == "<" ] && [ "$result" -lt $5 ]; then ((RES_PASS++)) echo -e " Result=${result} after ${duration} seconds${SAMELINE}" echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds" __print_current_stats return - elif [ $4 = "<" ] && [ "$result" -lt $5 ]; then + elif [ "$4" == ">=" ] && [ "$result" -ge $5 ]; then ((RES_PASS++)) echo -e " Result=${result} after ${duration} seconds${SAMELINE}" echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds" __print_current_stats return - elif [ $4 = "contain_str" ] && [[ $result =~ $5 ]]; then + elif [ "$4" == "contain_str" ] && [[ $result =~ $5 ]]; then ((RES_PASS++)) echo -e " Result=${result} after ${duration} seconds${SAMELINE}" echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds" @@ -2798,19 +3091,23 @@ __var_test() { echo -e $RED" FAIL ${ERED}- ${3} ${4} ${5} not reached, result = ${result}" __print_current_stats __check_stop_at_error - elif [ $4 = "=" ] && [ "$result" -eq $5 ]; then + elif [ "$4" == "=" ] && [ "$result" -eq $5 ]; then + ((RES_PASS++)) + echo -e $GREEN" PASS${EGREEN} - Result=${result}" + __print_current_stats + elif [ "$4" == ">" ] && [ "$result" -gt $5 ]; then ((RES_PASS++)) echo -e $GREEN" PASS${EGREEN} - Result=${result}" __print_current_stats - elif [ $4 = ">" ] && [ "$result" -gt $5 ]; then + elif [ "$4" == "<" ] && [ "$result" -lt $5 ]; then ((RES_PASS++)) echo -e $GREEN" PASS${EGREEN} - Result=${result}" __print_current_stats - elif [ $4 = "<" ] && [ "$result" -lt $5 ]; then + elif [ "$4" == ">=" ] && [ "$result" -ge $5 ]; then ((RES_PASS++)) echo -e $GREEN" PASS${EGREEN} - Result=${result}" __print_current_stats - elif [ $4 = "contain_str" ] && [[ $result =~ $5 ]]; then + elif [ "$4" == "contain_str" ] && [[ $result =~ $5 ]]; then ((RES_PASS++)) echo -e $GREEN" PASS${EGREEN} - Result=${result}" __print_current_stats