Kafka now works in kube for calls outside its namespace
[nonrtric.git] / test / common / pa_api_functions.sh
similarity index 93%
rename from test/common/agent_api_functions.sh
rename to test/common/pa_api_functions.sh
index 4cedad1..5367060 100644 (file)
@@ -444,6 +444,122 @@ start_stopped_policy_agent() {
 }
 
 
+# Function to perpare the consul configuration according to the current simulator configuration
+# args: SDNC|NOSDNC <output-file>
+# (Function for test scripts)
+prepare_consul_config() {
+       echo -e $BOLD"Prepare Consul config"$EBOLD
+
+       echo " Writing consul config for "$POLICY_AGENT_APP_NAME" to file: "$2
+
+       if [ $# != 2 ];  then
+               ((RES_CONF_FAIL++))
+       __print_err "need two args,  SDNC|NOSDNC <output-file>" $@
+               exit 1
+       fi
+
+       if [ $1 == "SDNC" ]; then
+               echo -e " Config$BOLD including SDNC$EBOLD configuration"
+       elif [ $1 == "NOSDNC" ];  then
+               echo -e " Config$BOLD excluding SDNC$EBOLD configuration"
+       else
+               ((RES_CONF_FAIL++))
+       __print_err "need two args,  SDNC|NOSDNC <output-file>" $@
+               exit 1
+       fi
+
+       config_json="\n            {"
+       if [ $1 == "SDNC" ]; then
+               config_json=$config_json"\n   \"controller\": ["
+               config_json=$config_json"\n                     {"
+               config_json=$config_json"\n                       \"name\": \"$SDNC_APP_NAME\","
+               config_json=$config_json"\n                       \"baseUrl\": \"$SDNC_SERVICE_PATH\","
+               config_json=$config_json"\n                       \"userName\": \"$SDNC_USER\","
+               config_json=$config_json"\n                       \"password\": \"$SDNC_PWD\""
+               config_json=$config_json"\n                     }"
+               config_json=$config_json"\n   ],"
+       fi
+
+       config_json=$config_json"\n   \"streams_publishes\": {"
+       config_json=$config_json"\n                            \"dmaap_publisher\": {"
+       config_json=$config_json"\n                              \"type\": \"message-router\","
+       config_json=$config_json"\n                              \"dmaap_info\": {"
+       config_json=$config_json"\n                                \"topic_url\": \"$MR_SERVICE_PATH$MR_WRITE_URL\""
+       config_json=$config_json"\n                              }"
+       config_json=$config_json"\n                            }"
+       config_json=$config_json"\n   },"
+       config_json=$config_json"\n   \"streams_subscribes\": {"
+       config_json=$config_json"\n                             \"dmaap_subscriber\": {"
+       config_json=$config_json"\n                               \"type\": \"message-router\","
+       config_json=$config_json"\n                               \"dmaap_info\": {"
+       config_json=$config_json"\n                                   \"topic_url\": \"$MR_SERVICE_PATH$MR_READ_URL\""
+       config_json=$config_json"\n                                 }"
+       config_json=$config_json"\n                               }"
+       config_json=$config_json"\n   },"
+
+       config_json=$config_json"\n   \"ric\": ["
+
+       if [ $RUNMODE == "KUBE" ]; then
+               result=$(kubectl get pods -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.items[?(@.metadata.labels.autotest=="RICSIM")].metadata.name}')
+               rics=""
+               ric_cntr=0
+               if [ $? -eq 0 ] && [ ! -z "$result" ]; then
+                       for im in $result; do
+                               if [[ $im != *"-0" ]]; then
+                                       ric_subdomain=$(kubectl get pod $im -n $KUBE_A1SIM_NAMESPACE -o jsonpath='{.spec.subdomain}')
+                                       rics=$rics" "$im"."$ric_subdomain"."$KUBE_A1SIM_NAMESPACE
+                                       let ric_cntr=ric_cntr+1
+                               fi
+                       done
+               fi
+               if [ $ric_cntr -eq 0 ]; then
+                       echo $YELLOW"Warning: No rics found for the configuration"$EYELLOW
+               fi
+       else
+               rics=$(docker ps --filter "name=$RIC_SIM_PREFIX" --filter "network=$DOCKER_SIM_NWNAME" --filter "status=running" --format {{.Names}})
+               if [ $? -ne 0 ] || [ -z "$rics" ]; then
+                       echo -e $RED" FAIL - the names of the running RIC Simulator cannot be retrieved." $ERED
+                       ((RES_CONF_FAIL++))
+                       return 1
+               fi
+       fi
+       cntr=0
+       for ric in $rics; do
+               if [ $cntr -gt 0 ]; then
+                       config_json=$config_json"\n          ,"
+               fi
+               config_json=$config_json"\n          {"
+               if [ $RUNMODE == "KUBE" ]; then
+                       ric_id=${ric%.*.*} #extract pod id from full hosthame
+                       ric_id=$(echo "$ric_id" | tr '-' '_')
+               else
+                       ric_id=$ric
+               fi
+               echo " Found a1 sim: "$ric_id
+               config_json=$config_json"\n            \"name\": \"$ric_id\","
+               config_json=$config_json"\n            \"baseUrl\": \"$RIC_SIM_HTTPX://$ric:$RIC_SIM_PORT\","
+               if [ $1 == "SDNC" ]; then
+                       config_json=$config_json"\n            \"controller\": \"$SDNC_APP_NAME\","
+               fi
+               config_json=$config_json"\n            \"managedElementIds\": ["
+               config_json=$config_json"\n              \"me1_$ric_id\","
+               config_json=$config_json"\n              \"me2_$ric_id\""
+               config_json=$config_json"\n            ]"
+               config_json=$config_json"\n          }"
+               let cntr=cntr+1
+       done
+
+       config_json=$config_json"\n           ]"
+       config_json=$config_json"\n}"
+
+       if [ $RUNMODE == "KUBE" ]; then
+               config_json="{\"config\":"$config_json"}"
+       fi
+
+       printf "$config_json">$2
+
+       echo ""
+}
 
 # Load the the appl config for the agent into a config map
 agent_load_config() {