X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=blobdiff_plain;f=test%2Fcommon%2Fagent_api_functions.sh;h=cb48d7888b3140c9057358817fc49a9bf64e113c;hb=cb28fdff6d977773c123db418ab0b5f6861f5335;hp=11cc1454efbc71c2f7e780a01a0d9c4bcacc95b6;hpb=520b912bce8eebb3e272c9ccfa3378b01dfd200b;p=nonrtric.git diff --git a/test/common/agent_api_functions.sh b/test/common/agent_api_functions.sh index 11cc1454..cb48d788 100644 --- a/test/common/agent_api_functions.sh +++ b/test/common/agent_api_functions.sh @@ -34,7 +34,7 @@ __PA_imagesetup() { # Shall be used for images that does not allow overriding # Both var may contain: 'remote', 'remote-remove' or 'local' __PA_imagepull() { - __check_and_pull_image $1 "$POLICY_AGENT_DISPLAY_NAME" $POLICY_AGENT_APP_NAME $POLICY_AGENT_IMAGE + __check_and_pull_image $1 "$POLICY_AGENT_DISPLAY_NAME" $POLICY_AGENT_APP_NAME POLICY_AGENT_IMAGE } # Build image (only for simulator or interfaces stubs owned by the test environment) @@ -45,9 +45,13 @@ __PA_imagebuild() { } # Generate a string for each included image using the app display name and a docker images format string +# If a custom image repo is used then also the source image from the local repo is listed # arg: __PA_image_data() { echo -e "$POLICY_AGENT_DISPLAY_NAME\t$(docker images --format $1 $POLICY_AGENT_IMAGE)" >> $2 + if [ ! -z "$POLICY_AGENT_IMAGE_SOURCE" ]; then + echo -e "-- source image --\t$(docker images --format $1 $POLICY_AGENT_IMAGE_SOURCE)" >> $2 + fi } # Scale kubernetes resources to zero @@ -92,6 +96,9 @@ PA_ADAPTER=$PA_PATH # Make curl retries towards the agent for http response codes set in this env var, space separated list of codes AGENT_RETRY_CODES="" +#Save first worker node the pod is started on +__PA_WORKER_NODE="" + ########################### ### Policy Agents functions ########################### @@ -199,6 +206,13 @@ start_policy_agent() { export POLICY_AGENT_CONFIG_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-config" export POLICY_AGENT_DATA_CONFIGMAP_NAME=$POLICY_AGENT_APP_NAME"-data" export POLICY_AGENT_PKG_NAME + + export POLICY_AGENT_DATA_PV_NAME=$POLICY_AGENT_APP_NAME"-pv" + export POLICY_AGENT_DATA_PVC_NAME=$POLICY_AGENT_APP_NAME"-pvc" + ##Create a unique path for the pv each time to prevent a previous volume to be reused + export POLICY_AGENT_PV_PATH="padata-"$(date +%s) + export POLICY_AGENT_CONTAINER_MNT_DIR + if [ $1 == "PROXY" ]; then AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT #Set if proxy is started AGENT_HTTP_PROXY_CONFIG_HOST_NAME=$HTTP_PROXY_CONFIG_HOST_NAME #Set if proxy is started @@ -233,6 +247,16 @@ start_policy_agent() { output_yaml=$PWD/tmp/pa_cfd.yaml __kube_create_configmap $POLICY_AGENT_DATA_CONFIGMAP_NAME $KUBE_NONRTRIC_NAMESPACE autotest PA $data_json $output_yaml + ## Create pv + input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pv.yaml + output_yaml=$PWD/tmp/pa_pv.yaml + __kube_create_instance pv $POLICY_AGENT_APP_NAME $input_yaml $output_yaml + + ## Create pvc + input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"pvc.yaml + output_yaml=$PWD/tmp/pa_pvc.yaml + __kube_create_instance pvc $POLICY_AGENT_APP_NAME $input_yaml $output_yaml + # Create service input_yaml=$SIM_GROUP"/"$POLICY_AGENT_COMPOSE_DIR"/"svc.yaml output_yaml=$PWD/tmp/pa_svc.yaml @@ -245,6 +269,12 @@ start_policy_agent() { fi + # Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host + __PA_WORKER_NODE=$(kubectl get pod -l "autotest=PA" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}') + if [ -z "$__PA_WORKER_NODE" ]; then + echo -e $YELLOW" Cannot find worker node for pod for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW + fi + echo " Retrieving host and ports for service..." PA_HOST_NAME=$(__kube_get_service_host $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE) POLICY_AGENT_EXTERNAL_PORT=$(__kube_get_service_port $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE "http") @@ -270,6 +300,25 @@ start_policy_agent() { exit fi + curdir=$PWD + cd $SIM_GROUP + cd policy_agent + cd $POLICY_AGENT_HOST_MNT_DIR + #cd .. + if [ -d db ]; then + if [ "$(ls -A $DIR)" ]; then + echo -e $BOLD" Cleaning files in mounted dir: $PWD/db"$EBOLD + rm -rf db/* &> /dev/null + if [ $? -ne 0 ]; then + echo -e $RED" Cannot remove database files in: $PWD"$ERED + exit 1 + fi + fi + else + echo " No files in mounted dir or dir does not exists" + fi + cd $curdir + #Export all vars needed for docker-compose export POLICY_AGENT_APP_NAME export POLICY_AGENT_APP_NAME_ALIAS @@ -287,6 +336,7 @@ start_policy_agent() { export POLICY_AGENT_CONFIG_FILE export POLICY_AGENT_PKG_NAME export POLICY_AGENT_DISPLAY_NAME + export POLICY_AGENT_CONTAINER_MNT_DIR if [ $1 == "PROXY" ]; then AGENT_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT #Set if proxy is started @@ -316,6 +366,79 @@ start_policy_agent() { return 0 } +# Stop the policy agent +# args: - +# args: - +# (Function for test scripts) +stop_policy_agent() { + echo -e $BOLD"Stopping $POLICY_AGENT_DISPLAY_NAME"$EBOLD + + if [ $RUNMODE == "KUBE" ]; then + __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA + echo " Deleting the replica set - a new will be started when the app is started" + tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=PA") + if [ $? -ne 0 ]; then + echo -e $RED" Could not delete replica set "$RED + ((RES_CONF_FAIL++)) + return 1 + fi + else + docker stop $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr + if [ $? -ne 0 ]; then + __print_err "Could not stop $POLICY_AGENT_APP_NAME" $@ + cat ./tmp/.dockererr + ((RES_CONF_FAIL++)) + return 1 + fi + fi + echo -e $BOLD$GREEN"Stopped"$EGREEN$EBOLD + echo "" + return 0 +} + +# Start a previously stopped policy agent +# args: - +# (Function for test scripts) +start_stopped_policy_agent() { + echo -e $BOLD"Starting (the previously stopped) $POLICY_AGENT_DISPLAY_NAME"$EBOLD + + if [ $RUNMODE == "KUBE" ]; then + + # Tie the PMS to the same worker node it was initially started on + # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume + if [ -z "$__PA_WORKER_NODE" ]; then + echo -e $RED" No initial worker node found for pod "$RED + ((RES_CONF_FAIL++)) + return 1 + else + echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__PA_WORKER_NODE to deployment for $POLICY_AGENT_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD + echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD + tmp=$(kubectl patch deployment $POLICY_AGENT_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__PA_WORKER_NODE'"}}}}}') + if [ $? -ne 0 ]; then + echo -e $YELLOW" Cannot set nodeSelector to deployment for $POLICY_AGENT_APP_NAME, persistency may not work"$EYELLOW + fi + __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1 + fi + + else + docker start $POLICY_AGENT_APP_NAME &> ./tmp/.dockererr + if [ $? -ne 0 ]; then + __print_err "Could not start (the stopped) $POLICY_AGENT_APP_NAME" $@ + cat ./tmp/.dockererr + ((RES_CONF_FAIL++)) + return 1 + fi + fi + __check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL + if [ $? -ne 0 ]; then + return 1 + fi + echo "" + return 0 +} + + + # Load the the appl config for the agent into a config map agent_load_config() { echo -e $BOLD"Agent - load config from "$EBOLD$1 @@ -928,8 +1051,8 @@ api_put_policy_parallel() { httpproxy="NOPROXY" if [ $RUNMODE == "KUBE" ]; then - if [ ! -z "$CLUSTER_KUBE_PROXY_NODEPORT" ]; then - httpproxy="http://localhost:$CLUSTER_KUBE_PROXY_NODEPORT" + if [ ! -z "$KUBE_PROXY_PATH" ]; then + httpproxy=$KUBE_PROXY_PATH fi fi @@ -1123,8 +1246,8 @@ api_delete_policy_parallel() { httpproxy="NOPROXY" if [ $RUNMODE == "KUBE" ]; then - if [ ! -z "$CLUSTER_KUBE_PROXY_NODEPORT" ]; then - httpproxy="http://localhost:$CLUSTER_KUBE_PROXY_NODEPORT" + if [ ! -z "$KUBE_PROXY_PATH" ]; then + httpproxy=$KUBE_PROXY_PATH fi fi