if [ $ECS_VERSION == "V1-1" ]; then
prodstub_check_jobdata 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
else
- prodstub_check_jobdata_2 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+ if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
+ prodstub_check_jobdata_3 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+ else
+ prodstub_check_jobdata_2 200 prod-a job1 type1 $TARGET1 ricsim_g3_1 testdata/ecs/job-template.json
+ fi
fi
## Create a second job for prod-a
prodstub_check_jobdata 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
else
if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
- prodstub_check_jobdata_2 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
- else
prodstub_check_jobdata_3 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
+ else
+ prodstub_check_jobdata_2 200 prod-a job2 type1 $TARGET2 ricsim_g3_2 testdata/ecs/job-template.json
fi
fi
if [ $retcode_p -eq 0 ]; then
echo -e " Using existing $POLICY_AGENT_APP_NAME deployment and service"
echo " Setting $POLICY_AGENT_APP_NAME replicas=1"
- __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ res_type=$(__kube_get_resource_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
fi
if [ $retcode_i -eq 0 ]; then
__check_prestarted_image "PA"
if [ $? -eq 0 ]; then
echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
- __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
+ res_type=$(__kube_get_resource_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
return 0
fi
__kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest PA
__check_prestarted_image "PA"
if [ $? -eq 0 ]; then
echo -e $YELLOW" Persistency may not work for app $POLICY_AGENT_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
- __kube_scale deployment $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ res_type=$(__kube_get_resource_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
__check_service_start $POLICY_AGENT_APP_NAME $PA_PATH$POLICY_AGENT_ALIVE_URL
return 0
fi
pms_kube_pvc_reset() {
__log_test_start $@
- __kube_clean_pvc $POLICY_AGENT_APP_NAME nonrtric policymanagementservice-vardata-pvc /var/policy-management-service/database
+ pvc_name=$(kubectl get pvc -n nonrtric --no-headers -o custom-columns=":metadata.name" | grep policy)
+ if [ -z "$pvc_name" ]; then
+ pvc_name=policymanagementservice-vardata-pvc
+ fi
+ echo " Trying to reset pvc: "$pvc_name
+ __kube_clean_pvc $POLICY_AGENT_APP_NAME nonrtric $pvc_name /var/policy-management-service/database
__log_test_pass
return 0
if [ $retcode_p -eq 0 ]; then
echo -e " Using existing $ECS_APP_NAME deployment and service"
echo " Setting ECS replicas=1"
- __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ res_type=$(__kube_get_resource_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
fi
# Check if app shall be fully managed by the test script
__check_prestarted_image "ECS"
if [ $? -eq 0 ]; then
echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
- __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
+ res_type=$(__kube_get_resource_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 0
return 0
fi
__check_prestarted_image "ECS"
if [ $? -eq 0 ]; then
echo -e $YELLOW" Persistency may not work for app $ECS_APP_NAME in multi-worker node config when running it as a prestarted app"$EYELLOW
- __kube_scale deployment $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ res_type=$(__kube_get_resource_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE)
+ __kube_scale $res_type $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
__check_service_start $ECS_APP_NAME $ECS_PATH$ECS_ALIVE_URL
return 0
fi
ecs_kube_pvc_reset() {
__log_test_start $@
- __kube_clean_pvc $ECS_APP_NAME nonrtric enrichmentservice-pvc /var/enrichment-coordinator-service/database
+ pvc_name=$(kubectl get pvc -n nonrtric --no-headers -o custom-columns=":metadata.name" | grep enrichment)
+ if [ -z "$pvc_name" ]; then
+ pvc_name=enrichmentservice-pvc
+ fi
+ echo " Trying to reset pvc: "$pvc_name
+
+ __kube_clean_pvc $ECS_APP_NAME nonrtric $pvc_name /var/enrichment-coordinator-service/database
__log_test_pass
return 0
__log_test_fail_general "Template file "$7" for jobdata, does not exist"
return 1
fi
- targetJson="{\"ei_job_identity\":\"$3\",\"ei_type_identity\":\"$4\",\"target_uri\":\"$5\",\"owner\":\"$6\", \"ei_job_data\":$jobfile,\"last_updated\":\"????\"}"
- file="./tmp/.p.json"
+ if [[ "$ECS_FEATURE_LEVEL" == *"INFO-TYPES"* ]]; then
+ targetJson="{\"info_job_identity\":\"$3\",\"info_type_identity\":\"$4\",\"target_uri\":\"$5\",\"owner\":\"$6\", \"info_job_data\":$jobfile,\"last_updated\":\"????\"}"
+ else
+ targetJson="{\"ei_job_identity\":\"$3\",\"ei_type_identity\":\"$4\",\"target_uri\":\"$5\",\"owner\":\"$6\", \"ei_job_data\":$jobfile,\"last_updated\":\"????\"}"
+ fi
+ file="./tmp/.p.json"
echo "$targetJson" > $file
curlString="curl -X GET -skw %{http_code} $PROD_STUB_PATH/jobdata/$2/$3"
### Functions for kube management
###################################
+# Get resource type for scaling
+# args: <resource-name> <namespace>
+__kube_get_resource_type() {
+ kubectl get deployment $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+ if [ $? -eq 0 ]; then
+ echo "deployment"
+ return 0
+ fi
+ kubectl get sts $1 -n $2 1> /dev/null 2> ./tmp/kubeerr
+ if [ $? -eq 0 ]; then
+ echo "sts"
+ return 0
+ fi
+ echo "unknown-resource-type"
+ return 1
+}
+
# Scale a kube resource to a specific count
# args: <resource-type> <resource-name> <namespace> <target-count>
# (Not for test scripts)
envsubst < $input_yaml > $output_yaml
- kubectl delete -f $output_yaml #> /dev/null 2>&1 # Delete the previous terminated pod - if existing
+ kubectl delete -f $output_yaml 1> /dev/null 2> /dev/null # Delete the previous terminated pod - if existing
__kube_create_instance pod pvc-cleaner $input_yaml $output_yaml
if [ $? -ne 0 ]; then
appcount=$1
shift
- os_version=$(uname -a 2> /dev/null | awk '{print tolower($0)}' | grep "microsoft")
- if [[ "$os_version" == *"microsoft"* ]]; then
- echo -e $YELLOW" Workaround for Linux on Win - delay container start, 1 sec, to make sure files mounted in the container are available on disk - WLS problem"$EYELLOW
- sleep 1
- fi
-
-
if [ "$compose_args" == "NODOCKERARGS" ]; then
docker-compose -f $compose_file up -d &> .dockererr
if [ $? -ne 0 ]; then
ENV NODE_ENV=production
WORKDIR /usr/src/app/cert
-COPY cert/*.crt ./ #Need trailing slash on dest for multiple file copy
+
+# Need trailing slash on dest for multiple file copy
+COPY cert/*.crt ./
COPY cert/pass .
WORKDIR /usr/src/app
FROM ${NEXUS_PROXY_REPO}python:3.8-slim-buster
-COPY app/ /usr/src/app/
-COPY cert/ /usr/src/app/cert/
-
-WORKDIR /usr/src/app/
-
-RUN chmod +x start.sh
+# Change order to fix problem with WSL
#install nginx
RUN apt-get update
RUN apt-get install -y nginx=1.14.*
-#start mrstub
+COPY app/ /usr/src/app/
+COPY cert/ /usr/src/app/cert/
+
+WORKDIR /usr/src/app
+
RUN pip install -r requirements.txt
+RUN chmod +x start.sh
+
CMD [ "./start.sh" ]
\ No newline at end of file