# args: <log-dir> <file-prexix>
__ICS_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
+ kubectl $KUBECONF logs -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_ics.log 2>&1
else
docker logs $ICS_APP_NAME > $1$2_ics.log 2>&1
fi
# args: -
__ICS_initial_setup() {
use_ics_rest_http
+ export ICS_SIDECAR_JWT_FILE=""
}
# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__ICS_test_requirements() {
+ :
+}
+
#######################################################
export ICS_DATA_PVC_NAME=$ICS_APP_NAME"-pvc"
#Create a unique path for the pv each time to prevent a previous volume to be reused
export ICS_PV_PATH="icsdata-"$(date +%s)
+ export HOST_PATH_BASE_DIR
if [ $1 == "PROXY" ]; then
export ICS_HTTP_PROXY_CONFIG_PORT=$HTTP_PROXY_CONFIG_PORT #Set if proxy is started
# Create app
input_yaml=$SIM_GROUP"/"$ICS_COMPOSE_DIR"/"app.yaml
output_yaml=$PWD/tmp/ics_app.yaml
+ if [ -z "$ICS_SIDECAR_JWT_FILE" ]; then
+ cat $input_yaml | sed '/#ICS_JWT_START/,/#ICS_JWT_STOP/d' > $PWD/tmp/ics_app_tmp.yaml
+ input_yaml=$PWD/tmp/ics_app_tmp.yaml
+ fi
__kube_create_instance app $ICS_APP_NAME $input_yaml $output_yaml
fi
# Keep the initial worker node in case the pod need to be "restarted" - must be made to the same node due to a volume mounted on the host
if [ $retcode_i -eq 0 ]; then
- __ICS_WORKER_NODE=$(kubectl get pod -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
+ __ICS_WORKER_NODE=$(kubectl $KUBECONF get pod -l "autotest=ICS" -n $KUBE_NONRTRIC_NAMESPACE -o jsonpath='{.items[*].spec.nodeName}')
if [ -z "$__ICS_WORKER_NODE" ]; then
echo -e $YELLOW" Cannot find worker node for pod for $ICS_APP_NAME, persistency may not work"$EYELLOW
fi
fi
else
echo " No files in mounted dir or dir does not exists"
+ mkdir db
fi
cd $curdir
__kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest ICS
echo " Deleting the replica set - a new will be started when the app is started"
- tmp=$(kubectl delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
+ tmp=$(kubectl $KUBECONF delete rs -n $KUBE_NONRTRIC_NAMESPACE -l "autotest=ICS")
if [ $? -ne 0 ]; then
echo -e $RED" Could not delete replica set "$RED
((RES_CONF_FAIL++))
return 0
fi
- # Tie the PMS to the same worker node it was initially started on
- # A PVC of type hostPath is mounted to PMS, for persistent storage, so the PMS must always be on the node which mounted the volume
+ # Tie the ICS to the same worker node it was initially started on
+ # A PVC of type hostPath is mounted to A1PMS, for persistent storage, so the A1PMS must always be on the node which mounted the volume
if [ -z "$__ICS_WORKER_NODE" ]; then
echo -e $RED" No initial worker node found for pod "$RED
((RES_CONF_FAIL++))
return 1
else
- echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ICS_WORKER_NODE to deployment for $ICS_APP_NAME. Pod will always run on this worker node: $__PA_WORKER_NODE"$BOLD
+ echo -e $BOLD" Setting nodeSelector kubernetes.io/hostname=$__ICS_WORKER_NODE to deployment for $ICS_APP_NAME. Pod will always run on this worker node: $__ICS_WORKER_NODE"$BOLD
echo -e $BOLD" The mounted volume is mounted as hostPath and only available on that worker node."$BOLD
- tmp=$(kubectl patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
+ tmp=$(kubectl $KUBECONF patch deployment $ICS_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE --patch '{"spec": {"template": {"spec": {"nodeSelector": {"kubernetes.io/hostname": "'$__ICS_WORKER_NODE'"}}}}}')
if [ $? -ne 0 ]; then
echo -e $YELLOW" Cannot set nodeSelector to deployment for $ICS_APP_NAME, persistency may not work"$EYELLOW
fi
# (Function for test scripts)
set_ics_trace() {
echo -e $BOLD"Setting ics trace logging"$EBOLD
- curlString="$ICS_SERVICE_PATH/actuator/loggers/org.oransc.information -X POST -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
+ curlString="$ICS_SERVICE_PATH$ICS_ACTUATOR -X POST -H Content-Type:application/json -d {\"configuredLevel\":\"trace\"}"
result=$(__do_curl "$curlString")
if [ $? -ne 0 ]; then
__print_err "Could not set trace mode" $@
if [[ "$ICS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
query="/data-producer/v1/info-producers"
if [ $# -gt 1 ] && [ $2 != "NOTYPE" ]; then
- query=$query"?info_type_id=$2"
+ query=$query"?info_type_id=$2&infoTypeId=$2" #info_type_id changed to infoTypeId in F-release.
+ #Remove info_type_id when F-release is no longer supported
fi
else
query="/ei-producer/v1/eiproducers"
ics_kube_pvc_reset() {
__log_test_start $@
- pvc_name=$(kubectl get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep information)
+ pvc_name=$(kubectl $KUBECONF get pvc -n $KUBE_NONRTRIC_NAMESPACE --no-headers -o custom-columns=":metadata.name" | grep information)
if [ -z "$pvc_name" ]; then
pvc_name=informationservice-pvc
fi
__log_test_pass
return 0
+}
+
+# args: <realm> <client-name> <client-secret>
+ics_configure_sec() {
+ export ICS_CREDS_GRANT_TYPE="client_credentials"
+ export ICS_CREDS_CLIENT_SECRET=$3
+ export ICS_CREDS_CLIENT_ID=$2
+ export ICS_AUTH_SERVICE_URL=$KEYCLOAK_SERVICE_PATH$KEYCLOAK_TOKEN_URL_PREFIX/$1/protocol/openid-connect/token
+ export ICS_SIDECAR_MOUNT="/token-cache"
+ export ICS_SIDECAR_JWT_FILE=$ICS_SIDECAR_MOUNT"/jwt.txt"
+
+ export AUTHSIDECAR_APP_NAME
+ export AUTHSIDECAR_DISPLAY_NAME
}
\ No newline at end of file