DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY "
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
start_ric_simulators ricsim_g3 1 STD_2.0.0
fi
- start_mr
+ start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
start_cr
TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter"
#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
set_ecs_trace
-start_mr
+start_mr "unauthenticated.dmaapmed.json" "/events" "dmaapmediatorproducer/STD_Fault_Messages" \
+ "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+ "unauthenticated.dmaapadp_kafka.text" "/events" "dmaapadapterproducer/msgs"
start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
# Check producers
ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
-ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
-# Create jobs for adapter
+# Create jobs for adapter - CR stores data as MD5 hash
start_timer "Create adapter jobs: $NUM_JOBS"
for ((i=1; i<=$NUM_JOBS; i++))
do
- ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+ ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i"?storeas=md5" info-owner-adp-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+
done
print_timer "Create adapter jobs: $NUM_JOBS"
-# Create jobs for mediator
+# Create jobs for adapter kafka - CR stores data as MD5 hash
+start_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ ecs_api_idc_put_job 201 job-adp-kafka-$i ExampleInformationTypeKafka $CR_SERVICE_TEXT_PATH/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-kafka-$i testdata/dmaap-adapter/job-template-1-kafka.json
+
+done
+print_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+
+# Create jobs for mediator - CR stores data as MD5 hash
start_timer "Create mediator jobs: $NUM_JOBS"
for ((i=1; i<=$NUM_JOBS; i++))
do
- ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+ ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i"?storeas=md5" info-owner-med-$i $CR_SERVICE_APP_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
done
print_timer "Create mediator jobs: $NUM_JOBS"
do
ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30
ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30
+ ecs_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
done
+
EXPECTED_DATA_DELIV=0
-# Send data to adapter via mr
+mr_api_generate_json_payload_file 1 ./tmp/data_for_dmaap_test.json
+mr_api_generate_text_payload_file 1 ./tmp/data_for_dmaap_test.txt
+
+## Send json file via message-router to adapter
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+## Send text file via message-router to adapter kafka
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter kafka
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+done
+
+## Send json file via message-router to mediator
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from mediator
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+# Send small json via message-router to adapter
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
start_timer "Data delivery adapter, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
print_timer "Data delivery adapter, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
-# Send data to mediator
+# Send small text via message-routere to adapter
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------1'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------3'
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapte kafkar, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery adapte kafkar, 2 strings per job"
+
+# Send small json via message-router to mediator
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
start_timer "Data delivery mediator, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
print_timer "Data delivery mediator, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_equal received_callbacks?id=job-med-data$i 2
- cr_equal received_callbacks?id=job-adp-data$i 2
+ cr_equal received_callbacks?id=job-med-data$i 7
+ cr_equal received_callbacks?id=job-adp-data$i 7
+ cr_equal received_callbacks?id=job-adp-kafka-data$i 7
done
# Check received data and order for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}'
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-0"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-2"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-1"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-3"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------1'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------3'
done
# Set delay in the callback receiver to slow down callbacks
-SEC_DELAY=5
+SEC_DELAY=2
cr_delay_callback 200 $SEC_DELAY
-# Send data to adapter via mr
+# Send small json via message-router to adapter
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}'
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}'
# Wait for data recetption, adapter
EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
-print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
+
+# Send small text via message-router to adapter kafka
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------5'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------7'
-# Send data to mediator
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
+print_timer "Data delivery adapter with kafka $SEC_DELAY seconds delay in consumer, 2 strings per job"
+
+
+# Send small json via message-router to mediator
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}'
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}'
# Wait for data reception, mediator
EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000
-print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_equal received_callbacks?id=job-med-data$i 4
- cr_equal received_callbacks?id=job-adp-data$i 4
+ cr_equal received_callbacks?id=job-med-data$i 9
+ cr_equal received_callbacks?id=job-adp-data$i 9
+ cr_equal received_callbacks?id=job-adp-kafka-data$i 9
done
# Check received data and order for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}'
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-4"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-6"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-5"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-7"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------5'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------7'
done
-
-
#### TEST COMPLETE ####
store_logs END
print_result
-auto_clean_environment
\ No newline at end of file
+auto_clean_environment
DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC KUBEPROXY NGW"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
start_ric_simulators $RIC_SIM_PREFIX"_g3" $STD_NUM_RICS STD_2.0.0
- start_mr
+ start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
clean_environment
start_kube_proxy
-start_mr
+start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub" \
+ "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+ "unauthenticated.dmaapmed.json" "/events" "maapmediatorproducer/STD_Fault_Messages"
+
if [ $RUNMODE == "KUBE" ]; then
:
else
--- /dev/null
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "filter": {
+ "type": "string"
+ },
+ "maxConcurrency": {
+ "type": "integer"
+ },
+ "bufferTimeout": {
+ "type": "object",
+ "properties": {
+ "maxSize": {
+ "type": "integer"
+ },
+ "maxTimeMiliseconds": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "maxSize",
+ "maxTimeMiliseconds"
+ ]
+ }
+ },
+ "required": []
+}
\ No newline at end of file
--- /dev/null
+{
+ "maxConcurrency": 1,
+ "bufferTimeout": {
+ "maxSize": 1,
+ "maxTimeMiliseconds": 0
+ }
+}
\ No newline at end of file
| `--print-stats` | Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
| `--override <file>` | Override setting from the file supplied by --env-file |
| `--pre-clean` | Clean kube resouces when running docker and vice versa |
+| `--gen-stats` | Collect container/pod runtime statistics |
| `help` | Print this info along with the test script description and the list of app short names supported |
## Function: setup_testenvironment ##
use_agent_rest_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PA_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "PA $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "PA $POLICY_AGENT_APP_NAME"
+ fi
+}
+
+
#######################################################
###########################
# one for sending the requests and one for receiving the response
# but only when using the DMAAP interface
# REST or DMAAP is controlled of the base url of $XX_ADAPTER
-# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file>]) | (PA|ECS RESPONSE <correlation-id>)
+# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file> [mime-type]]) | (PA|ECS RESPONSE <correlation-id>)
+# Default mime type for file is application/json unless specified in parameter mime-type
# (Not for test scripts)
__do_curl_to_api() {
TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
paramError=0
input_url=$3
+ fname=$4
if [ $# -gt 0 ]; then
if [ $1 == "PA" ]; then
__ADAPTER=$PA_ADAPTER
__ADAPTER=$MR_STUB_ADAPTER
__ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE
__RETRY_CODES=""
- else
+ elif [ $1 == "DMAAPMR" ]; then
+ __ADAPTER=$MR_DMAAP_ADAPTER_HTTP
+ __ADAPTER_TYPE=$MR_DMAAP_ADAPTER_TYPE
+ __RETRY_CODES=""
+ else
paramError=1
fi
- if [ $__ADAPTER_TYPE == "MR-HTTP" ]; then
+ if [ "$__ADAPTER_TYPE" == "MR-HTTP" ]; then
__ADAPTER=$MR_ADAPTER_HTTP
fi
- if [ $__ADAPTER_TYPE == "MR-HTTPS" ]; then
+ if [ "$__ADAPTER_TYPE" == "MR-HTTPS" ]; then
__ADAPTER=$MR_ADAPTER_HTTPS
fi
fi
- if [ $# -lt 3 ] || [ $# -gt 4 ]; then
+ if [ $# -lt 3 ] || [ $# -gt 5 ]; then
paramError=1
else
timeout=""
fi
if [ $# -gt 3 ]; then
content=" -H Content-Type:application/json"
+ fname=$4
+ if [ $# -gt 4 ]; then
+ content=" -H Content-Type:"$5
+ fi
fi
if [ $2 == "GET" ] || [ $2 == "GET_BATCH" ]; then
oper="GET"
fi
elif [ $2 == "PUT" ] || [ $2 == "PUT_BATCH" ]; then
oper="PUT"
- if [ $# -eq 4 ]; then
- file=" --data-binary @$4"
+ if [ $# -gt 3 ]; then
+ file=" --data-binary @$fname"
fi
accept=" -H accept:application/json"
elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then
oper="POST"
accept=" -H accept:*/*"
- if [ $# -eq 4 ]; then
- file=" --data-binary @$4"
+ if [ $# -gt 3 ]; then
+ file=" --data-binary @$fname"
accept=" -H accept:application/json"
fi
elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then
oper=" -X "$oper
curlString="curl -k $proxyflag "${oper}${timeout}${httpcode}${accept}${content}${url}${file}
echo " CMD: "$curlString >> $HTTPLOG
- if [ $# -eq 4 ]; then
- echo " FILE: $(<$4)" >> $HTTPLOG
+ if [ $# -gt 3 ]; then
+ echo " FILE: $(<$fname)" >> $HTTPLOG
fi
# Do retry for configured response codes, otherwise only one attempt
else
if [ $oper != "RESPONSE" ]; then
requestUrl=$input_url
- if [ $2 == "PUT" ] && [ $# -eq 4 ]; then
- payload="$(cat $4 | tr -d '\n' | tr -d ' ' )"
+ if [ $2 == "PUT" ] && [ $# -gt 3 ]; then
+ payload="$(cat $fname | tr -d '\n' | tr -d ' ' )"
echo "payload: "$payload >> $HTTPLOG
file=" --data-binary "$payload
- elif [ $# -eq 4 ]; then
- echo " FILE: $(cat $4)" >> $HTTPLOG
+ elif [ $# -gt 3 ]; then
+ echo " FILE: $(cat $fname)" >> $HTTPLOG
fi
#urlencode the request url since it will be carried by send-request url
requestUrl=$(python3 -c "from __future__ import print_function; import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "$input_url")
CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CONSUL_statisics_setup() {
+ echo ""
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CBS_statisics_setup() {
+ echo ""
+}
#######################################################
__CP_initial_setup() {
use_control_panel_http
}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CP_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "CP $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "CP $CONTROL_PANEL_APP_NAME"
+ fi
+}
+
#######################################################
# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
# This function is called for apps fully managed by the test script
__SDNC_kube_scale_zero() {
- __kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+ __kube_scale_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
}
# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
# Delete all kube resouces for the app
# This function is called for apps managed by the test script.
__SDNC_kube_delete_all() {
- __kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+ __kube_delete_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
}
# Store docker logs
# args: <log-dir> <file-prexix>
__SDNC_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
- podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
- kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+ kubectl logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+ podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+ kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
else
docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
fi
use_sdnc_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__SDNC_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "SDNC $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE"
+ else
+ echo "SDNC $SDNC_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to SDNC
SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2 # docker access, container->container and script->container via proxy
SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL
if [ $RUNMODE == "KUBE" ]; then
- SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
- SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL
+ SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SDNC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+ SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SDNC_NAMESPACE":"$1$SDNC_API_URL
fi
echo ""
# Export env vars for config files, docker compose and kube resources
# args:
__sdnc_export_vars() {
- export KUBE_SNDC_NAMESPACE
+ export KUBE_SDNC_NAMESPACE
export DOCKER_SIM_NWNAME
export SDNC_APP_NAME
if [ $retcode_p -eq 0 ]; then
echo -e " Using existing $SDNC_APP_NAME deployment and service"
echo " Setting SDNC replicas=1"
- __kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1
+ __kube_scale deployment $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE 1
fi
# Check if app shall be fully managed by the test script
echo -e " Creating $SDNC_APP_NAME app and expose service"
#Check if namespace exists, if not create it
- __kube_create_namespace $KUBE_SNDC_NAMESPACE
+ __kube_create_namespace $KUBE_SDNC_NAMESPACE
__sdnc_export_vars
use_cr_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "CR $CR_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "CR $CR_APP_NAME"
+ fi
+}
+
#######################################################
################
fi
# Service paths are used in test script to provide callbacck urls to app
CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR #Only for messages from dmaap adapter/mediator
+ CR_SERVICE_TEXT_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_TEXT #Callbacks for text payload
CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK #For general callbacks from apps
# CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
body=${res:0:${#res}-3}
targetJson=$3
+ if [ $targetJson == "EMPTY" ] && [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ fi
echo " TARGET JSON: $targetJson" >> $HTTPLOG
res=$(python3 ../common/compare_json.py "$targetJson" "$body")
return 1
fi
+ __log_test_pass
+ return 0
+}
+
+# CR API: Check a single (oldest) json in md5 format (or none if empty) for path.
+# Note that if a json message is given, it shall be compact, no ws except inside string.
+# The MD5 will generate different hash if ws is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-msg> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <topic-url> (EMPTY | <data-msg> )" $@
+ return 1
+ fi
+
+ query="/get-event/"$2
+ res="$(__do_curl_to_api CR GET $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+ body=${res:0:${#res}-3}
+ if [ $3 == "EMPTY" ]; then
+ if [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ else
+ __log_test_pass
+ return 0
+ fi
+ fi
+ command -v md5 > /dev/null # Mac
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$3" | md5)
+ else
+ command -v md5sum > /dev/null # Linux
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$3" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd
+ else
+ __log_test_fail_general "Command md5 nor md5sum is available"
+ return 1
+ fi
+ fi
+ targetMd5="\""$targetMd5"\"" #Quotes needed
+
+ echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+ if [ "$body" != "$targetMd5" ]; then
+ __log_test_fail_body
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# CR API: Check a single (oldest) event in md5 format (or none if empty) for path.
+# Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines.
+# The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-file> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5_file() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <topic-url> (EMPTY | <data-file> )" $@
+ return 1
+ fi
+
+ query="/get-event/"$2
+ res="$(__do_curl_to_api CR GET $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+ body=${res:0:${#res}-3}
+ if [ $3 == "EMPTY" ]; then
+ if [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ else
+ __log_test_pass
+ return 0
+ fi
+ fi
+
+ if [ ! -f $3 ]; then
+ __log_test_fail_general "File $3 does not exist"
+ return 1
+ fi
+
+ filedata=$(cat $3)
+
+ command -v md5 > /dev/null # Mac
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$filedata" | md5)
+ else
+ command -v md5sum > /dev/null # Linux
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$filedata" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd
+ else
+ __log_test_fail_general "Command md5 nor md5sum is available"
+ return 1
+ fi
+ fi
+ targetMd5="\""$targetMd5"\"" #Quotes needed
+
+ echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+ if [ "$body" != "$targetMd5" ]; then
+ __log_test_fail_body
+ return 1
+ fi
+
__log_test_pass
return 0
}
\ No newline at end of file
use_dmaapadp_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPADP_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "DMAAPADP $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "DMAAPADP $DMAAP_ADP_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap adapter
use_dmaapmed_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMED_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "DMAAPMED $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "DMAAPMED $DMAAP_MED_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap mediator
use_ecs_rest_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__ECS_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "ECS $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "ECS $ECS_APP_NAME"
+ fi
+}
+
#######################################################
use_gateway_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__NGW_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "NGW $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "NGW $NRT_GATEWAY_APP_NAME"
+ fi
+}
+
#######################################################
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This script collects container statistics to a file. Data is separated with semicolon.
+# Works for both docker container and kubernetes pods.
+# Relies on 'docker stats' so will not work for other container runtimes.
+# Used by the test env.
+
+# args: docker <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*
+# or
+# args: kube <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*
+
+print_usage() {
+ echo "Usage: genstat.sh DOCKER <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*"
+ echo "or"
+ echo "Usage: genstat.sh KUBE <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*"
+}
+
+STARTTIME=-1
+
+if [ $# -lt 4 ]; then
+ print_usage
+ exit 1
+fi
+if [ $1 == "DOCKER" ]; then
+ STAT_TYPE=$1
+ shift
+ STARTTIME=$1
+ shift
+ LOGFILE=$1
+ shift
+ if [ $(($#%2)) -ne 0 ]; then
+ print_usage
+ exit 1
+ fi
+elif [ $1 == "KUBE" ]; then
+ STAT_TYPE=$1
+ shift
+ STARTTIME=$1
+ shift
+ LOGFILE=$1
+ shift
+ if [ $(($#%3)) -ne 0 ]; then
+ print_usage
+ exit 1
+ fi
+else
+ print_usage
+ exit 1
+fi
+
+
+echo "Time;Name;PIDS;CPU perc;Mem perc" > $LOGFILE
+
+if [ "$STARTTIME" -ne -1 ]; then
+ STARTTIME=$(($SECONDS-$STARTTIME))
+fi
+
+while [ true ]; do
+ docker stats --no-stream --format "table {{.Name}};{{.PIDs}};{{.CPUPerc}};{{.MemPerc}}" > tmp/.tmp_stat_out.txt
+ if [ "$STARTTIME" -eq -1 ]; then
+ STARTTIME=$SECONDS
+ fi
+ CTIME=$(($SECONDS-$STARTTIME))
+
+ TMP_APPS=""
+
+ while read -r line; do
+ APP_LIST=(${@})
+ if [ $STAT_TYPE == "DOCKER" ]; then
+ for ((i=0; i<$#; i=i+2)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ d=$(echo $line | grep -v "k8s" | grep $APP)
+ if [ ! -z $d ]; then
+ d=$(echo $d | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+ echo "$SAPP;$CTIME;$d" >> $LOGFILE
+ TMP_APPS=$TMP_APPS" $SAPP "
+ fi
+ done
+ else
+ for ((i=0; i<$#; i=i+3)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ NS=${APP_LIST[$i+2]}
+ d=$(echo "$line" | grep -v "k8s_POD" | grep "k8s" | grep $APP | grep $NS)
+ if [ ! -z "$d" ]; then
+ d=$(echo "$d" | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+ data="$SAPP-$NS;$CTIME;$d"
+ echo $data >> $LOGFILE
+ TMP_APPS=$TMP_APPS" $SAPP-$NS "
+ fi
+ done
+ fi
+ done < tmp/.tmp_stat_out.txt
+
+ APP_LIST=(${@})
+ if [ $STAT_TYPE == "DOCKER" ]; then
+ for ((i=0; i<$#; i=i+2)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ if [[ $TMP_APPS != *" $SAPP "* ]]; then
+ data="$SAPP;$CTIME;0;0,00;0,00"
+ echo $data >> $LOGFILE
+ fi
+ done
+ else
+ for ((i=0; i<$#; i=i+3)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ NS=${APP_LIST[$i+2]}
+ if [[ $TMP_APPS != *" $SAPP-$NS "* ]]; then
+ data="$SAPP-$NS;$CTIME;0;0,00;0,00"
+ echo $data >> $LOGFILE
+ fi
+ done
+ fi
+ sleep 1
+done
:
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HTTPPROXY_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "HTTPPROXY $HTTP_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "HTTPPROXY $HTTP_PROXY_APP_NAME"
+ fi
+}
+
#######################################################
use_kube_proxy_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__KUBEPROXY_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "KUBEPROXXY $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "KUBEPROXXY $KUBE_PROXY_APP_NAME"
+ fi
+}
+
#######################################################
## Access to Kube http proxy
: # handle by __MR_initial_setup
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__MR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "MR $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE"
+ else
+ echo "MR $MR_STUB_APP_NAME"
+ fi
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo ""
+ else
+ echo ""
+ fi
+}
#######################################################
+# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
+#
+# 'MR-STUB only' is started when only 'MR' is included in the test script. Both the test scripts and app will then use MR-STUB as a message-router simulator.
+#
+# 'MR-STUB + MESSAGE-ROUTER' is started when 'MR' and 'DMAAPMR' is included in the testscripts. DMAAPMR is the real message router including kafka and zookeeper.
+# In this configuration, MR-STUB is used by the test-script as frontend to the message-router while app are using the real message-router.
+#
+# DOCKER KUBE
+# ---------------------------------------------------------------------------------------------------------------------------------------------------
+
+# MR-STUB MR-STUB
+# +++++++ +++++++
+# localhost container service pod
+# ==============================================================================================================================================
+# 10 MR_STUB_LOCALHOST_PORT -> 13 MR_INTERNAL_PORT 15 MR_EXTERNAL_PORT -> 17 MR_INTERNAL_PORT
+# 12 MR_STUB_LOCALHOST_SECURE_PORT -> 14 MR_INTERNAL_SECURE_PORT 16 MR_EXTERNAL_SECURE_PORT -> 18 MR_INTERNAL_SECURE_PORT
+
+
+
+# MESSAGE-ROUTER MESSAGE-ROUTER
+# ++++++++++++++ ++++++++++++++
+# localhost container service pod
+# ===================================================================================================================================================
+# 20 MR_DMAAP_LOCALHOST_PORT -> 23 MR_INTERNAL_PORT 25 MR_EXTERNAL_PORT -> 27 MR_INTERNAL_PORT
+# 22 MR_DMAAP_LOCALHOST_SECURE_PORT -> 24 MR_INTERNAL_SECURE_PORT 26 MR_EXTERNAL_SECURE_PORT -> 28 MR_INTERNAL_SECURE_PORT
+
+
+# Running only the MR-STUB - apps using MR-STUB
+# DOCKER KUBE
+# localhost: 10 and 12 -
+# via proxy (script): 13 and 14 via proxy (script): 15 and 16
+# apps: 13 and 14 apps: 15 and 16
+
+# Running MR-STUB (as frontend for test script) and MESSAGE-ROUTER - apps using MESSAGE-ROUTER
+# DOCKER KUBE
+# localhost: 10 and 12 -
+# via proxy (script): 13 and 14 via proxy (script): 15 and 16
+# apps: 23 and 24 apps: 25 and 26
+#
+
+
+
use_mr_http() {
- __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+ __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
}
use_mr_https() {
- __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+ __mr_set_protocoll "https" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
}
# Setup paths to svc/container for internal and external access
-# args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# args: <protocol> <internal-port> <external-port> <internal-secure-port> <external-secure-port>
__mr_set_protocoll() {
echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
MR_HTTPX=$1
+ if [ $MR_HTTPX == "http" ]; then
+ INT_PORT=$2
+ EXT_PORT=$3
+ else
+ INT_PORT=$4
+ EXT_PORT=$5
+ fi
+
# Access via test script
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+ MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$INT_PORT # access from script via proxy, docker
+ MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$INT_PORT # access from script via proxy, docker
+ MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube
+ MR_KAFKA_SERVICE_PATH=""
__check_included_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME":"$MR_KAFKA_PORT
fi
# For directing calls from script to e.g.PMS via message rounter
- # Theses case shall always go though the mr-stub
- MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
- MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+ # These cases shall always go though the mr-stub
+ MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$2
+ MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$4
+
+ MR_DMAAP_ADAPTER_TYPE="REST"
+
+
if [ $RUNMODE == "KUBE" ]; then
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+ MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
+ MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
MR_SERVICE_PATH=$MR_STUB_PATH
__check_included_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
fi
__check_prestarted_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
fi
# For directing calls from script to e.g.PMS, via message rounter
# These calls shall always go though the mr-stub
- MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
- MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+ MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3
+ MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$5
fi
# For calls from script to the mr-stub
MR_STUB_ADAPTER_TYPE="REST"
echo ""
+
}
+
+# use_mr_http() { 2 3 4 5 6 7
+# __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+# }
+
+# use_mr_https() {
+# __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+# }
+
+# # Setup paths to svc/container for internal and external access
+# # args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# __mr_set_protocoll() {
+# echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
+# echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
+
+# ## Access to Dmaap mediator
+
+# MR_HTTPX=$1
+
+# # Access via test script
+# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker
+# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+# MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
+
+# MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube
+# __check_included_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+
+# # For directing calls from script to e.g.PMS via message rounter
+# # These cases shall always go though the mr-stub
+# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
+# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+
+# MR_DMAAP_ADAPTER_TYPE="REST"
+
+# if [ $RUNMODE == "KUBE" ]; then
+# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+
+# MR_SERVICE_PATH=$MR_STUB_PATH
+# __check_included_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+# __check_prestarted_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+
+# # For directing calls from script to e.g.PMS, via message rounter
+# # These calls shall always go though the mr-stub
+# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
+# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+# fi
+
+# # For calls from script to the mr-stub
+# MR_STUB_ADAPTER=$MR_STUB_PATH
+# MR_STUB_ADAPTER_TYPE="REST"
+
+# echo ""
+
+# }
+
# Export env vars for config files, docker compose and kube resources
# args: -
__dmaapmr_export_vars() {
export MR_DMAAP_LOCALHOST_SECURE_PORT
export MR_INTERNAL_SECURE_PORT
export MR_DMAAP_HOST_MNT_DIR
+
+ export KUBE_ONAP_NAMESPACE
+ export MR_EXTERNAL_PORT
+ export MR_EXTERNAL_SECURE_PORT
+ export MR_KAFKA_PORT
+ export MR_ZOOKEEPER_PORT
+
+ export MR_KAFKA_SERVICE_PATH
}
# Export env vars for config files, docker compose and kube resources
export MRSTUB_IMAGE
export MR_INTERNAL_PORT
export MR_INTERNAL_SECURE_PORT
+ export MR_EXTERNAL_PORT
+ export MR_EXTERNAL_SECURE_PORT
export MR_STUB_LOCALHOST_PORT
export MR_STUB_LOCALHOST_SECURE_PORT
export MR_STUB_CERT_MOUNT_DIR
export MR_STUB_DISPLAY_NAME
+
+ export KUBE_ONAP_NAMESPACE
+ export MR_EXTERNAL_PORT
+
+ export MR_KAFKA_SERVICE_PATH
}
__dmaapmr_export_vars
- #export MR_DMAAP_APP_NAME
- export MR_DMAAP_KUBE_APP_NAME=message-router
- MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME
- export KUBE_ONAP_NAMESPACE
- export MR_EXTERNAL_PORT
- export MR_INTERNAL_PORT
- export MR_EXTERNAL_SECURE_PORT
- export MR_INTERNAL_SECURE_PORT
- export ONAP_DMAAPMR_IMAGE
-
- export MR_KAFKA_BWDS_NAME=akfak-bwds
- export MR_KAFKA_BWDS_NAME=kaka
- export KUBE_ONAP_NAMESPACE
-
- export MR_ZOOKEEPER_APP_NAME
- export ONAP_ZOOKEEPER_IMAGE
-
#Check if onap namespace exists, if not create it
__kube_create_namespace $KUBE_ONAP_NAMESPACE
- # TODO - Fix domain name substitution in the prop file
- # Create config maps - dmaapmr app
- configfile=$PWD/tmp/MsgRtrApi.properties
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile
+ # copy config files
+ MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+ cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH
+ # Create config maps - dmaapmr app
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml
__kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
- configfile=$PWD/tmp/logback.xml
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/logback.xml
output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml
__kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
- configfile=$PWD/tmp/cadi.properties
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/cadi.properties
output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml
__kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
# Create config maps - kafka app
- configfile=$PWD/tmp/zk_client_jaas.conf
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/kafka/zk_client_jaas.conf
output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml
__kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
# Create config maps - zookeeper app
- configfile=$PWD/tmp/zk_server_jaas.conf
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/zk/zk_server_jaas.conf
output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml
__kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
__kube_create_instance app $MR_DMAAP_APP_NAME $input_yaml $output_yaml
- echo " Retrieving host and ports for service..."
- MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
+ # echo " Retrieving host and ports for service..."
+ # MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
- MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
- MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+ # MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+ # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
- echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
- MR_SERVICE_PATH=""
- if [ $MR_HTTPX == "http" ]; then
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
- else
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+ # MR_SERVICE_PATH=""
+ # if [ $MR_HTTPX == "http" ]; then
+ # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+ # else
+ # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # fi
+
+ __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+
+ # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+ #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+ #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+# __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+#
+# __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+
+
+ #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
+
+ if [ $# -gt 0 ]; then
+ if [ $(($#%3)) -eq 0 ]; then
+ while [ $# -gt 0 ]; do
+ __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+ shift; shift; shift;
+ done
+ else
+ echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+ echo -e $RED" Got: $@"$ERED
+ exit 1
+ fi
fi
- __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+ echo " Current topics:"
+ curlString="$MR_DMAAP_PATH/topics"
+ result=$(__do_curl "$curlString")
+ echo $result | indent2
fi
if [ $retcode_included_mr -eq 0 ]; then
- #exporting needed var for deployment
- export MR_STUB_APP_NAME
- export KUBE_ONAP_NAMESPACE
- export MRSTUB_IMAGE
- export MR_INTERNAL_PORT
- export MR_INTERNAL_SECURE_PORT
- export MR_EXTERNAL_PORT
- export MR_EXTERNAL_SECURE_PORT
+
+ __mr_export_vars
if [ $retcode_prestarted_dmaapmr -eq 0 ] || [ $retcode_included_dmaapmr -eq 0 ]; then # Set topics for dmaap
export TOPIC_READ="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+ export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT"
else
export TOPIC_READ=""
export TOPIC_WRITE=""
+ export GENERIC_TOPICS_UPLOAD_BASEURL=""
fi
#Check if onap namespace exists, if not create it
fi
-
- echo " Retrieving host and ports for service..."
- MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
-
- MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
- MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
-
- echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
- if [ $MR_HTTPX == "http" ]; then
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
- if [ -z "$MR_SERVICE_PATH" ]; then
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
- fi
- else
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
- if [ -z "$MR_SERVICE_PATH" ]; then
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
- fi
- fi
- MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
- MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-
- MR_STUB_ADAPTER=$MR_STUB_PATH
- MR_STUB_ADAPTER_TYPE="REST"
+ # echo " Retrieving host and ports for service..."
+ # MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
+
+ # MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+ # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+
+ # echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+ # if [ $MR_HTTPX == "http" ]; then
+ # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+ # if [ -z "$MR_SERVICE_PATH" ]; then
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+ # fi
+ # else
+ # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+ # if [ -z "$MR_SERVICE_PATH" ]; then
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # fi
+ # fi
+ # MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+ # MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+
+ # MR_STUB_ADAPTER=$MR_STUB_PATH
+ # MR_STUB_ADAPTER_TYPE="REST"
__check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL
export TOPIC_READ=""
export TOPIC_WRITE=""
+ export GENERIC_TOPICS_UPLOAD_BASEURL=""
if [ $retcode_dmaapmr -eq 0 ]; then # Set topics for dmaap
export TOPIC_READ="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+ export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT"
fi
__dmaapmr_export_vars
if [ $retcode_dmaapmr -eq 0 ]; then
+
+ # copy config files
+ MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+ cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH
+
+ # substitute vars
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
+ cp $configfile $configfile"_tmp"
+ envsubst < $configfile"_tmp" > $configfile
+
__start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME
__check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
- __create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+ # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+ #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+ #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+ #__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
- __create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+ #__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
- __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+ if [ $# -gt 0 ]; then
+ if [ $(($#%3)) -eq 0 ]; then
+ while [ $# -gt 0 ]; do
+ __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+ shift; shift; shift;
+ done
+ else
+ echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+ echo -e $RED" Got: $@"$ERED
+ exit 1
+ fi
+ fi
- __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
echo " Current topics:"
curlString="$MR_DMAAP_PATH/topics"
# Create a dmaap mr topic
# args: <topic name> <topic-description>
__create_topic() {
- echo -ne " Creating read topic: $1"$SAMELINE
+ echo -ne " Creating topic: $1"$SAMELINE
json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
- echo $json_topic > ./tmp/$1.json
+ fname="./tmp/$1.json"
+ echo $json_topic > $fname
- curlString="$MR_DMAAP_PATH/topics/create -X POST -H Content-Type:application/json -d@./tmp/$1.json"
- topic_retries=5
+ query="/topics/create"
+ topic_retries=10
while [ $topic_retries -gt 0 ]; do
let topic_retries=topic_retries-1
- result=$(__do_curl "$curlString")
- if [ $? -eq 0 ]; then
+ res="$(__do_curl_to_api DMAAPMR POST $query $fname)"
+ status=${res:${#res}-3}
+
+ if [[ $status == "2"* ]]; then
topic_retries=0
- echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
- fi
- if [ $? -ne 0 ]; then
+ echo -e " Creating topic: $1 $GREEN OK $EGREEN"
+ else
if [ $topic_retries -eq 0 ]; then
- echo -e " Creating read topic: $1 $RED Failed $ERED"
+ echo -e " Creating topic: $1 $RED Failed $ERED"
((RES_CONF_FAIL++))
return 1
else
fi
fi
done
+ echo
return 0
}
# Do a pipeclean of a topic - to overcome dmaap mr bug...
-# args: <topic> <post-url> <read-url>
+# args: <topic> <post-url> <read-url> [<num-retries>]
__dmaap_pipeclean() {
pipeclean_retries=50
+ if [ $# -eq 4 ]; then
+ pipeclean_retries=$4
+ fi
echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
while [ $pipeclean_retries -gt 0 ]; do
- echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
+ if [[ $1 == *".text" ]]; then
+ echo "pipeclean-$1:$pipeclean_retries" > ./tmp/__dmaap_pipeclean.txt
+ curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:text/plain -d@./tmp/__dmaap_pipeclean.txt"
+ else
+ echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/__dmaap_pipeclean.json
+ curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/__dmaap_pipeclean.json"
+ fi
let pipeclean_retries=pipeclean_retries-1
- curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/pipeclean.json"
result=$(__do_curl "$curlString")
if [ $? -ne 0 ]; then
sleep 1
# arg: <topic-url> <json-msg>
# (Function for test scripts)
mr_api_send_json() {
- __log_test_start $@
+ __log_conf_start $@
if [ $# -ne 2 ]; then
__print_err "<topic-url> <json-msg>" $@
return 1
status=${res:${#res}-3}
if [ $status -ne 200 ]; then
- __log_test_fail_status_code 200 $status
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send text to topic in mr-stub.
+# arg: <topic-url> <text-msg>
+# (Function for test scripts)
+mr_api_send_text() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-msg>" $@
+ return 1
+ fi
+ query=$1
+ fname=$PWD/tmp/text_payload_to_mr.txt
+ echo $2 > $fname
+ res="$(__do_curl_to_api MRSTUB POST $query $fname text/plain)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send json file to topic in mr-stub.
+# arg: <topic-url> <json-file>
+# (Function for test scripts)
+mr_api_send_json_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <json-file>" $@
+ return 1
+ fi
+ query=$1
+ if [ ! -f $2 ]; then
+ __log_test_fail_general "File $2 does not exist"
+ return 1
+ fi
+ #Create json array for mr
+ datafile="tmp/mr_api_send_json_file.json"
+ { echo -n "[" ; cat $2 ; echo -n "]" ;} > $datafile
+
+ res="$(__do_curl_to_api MRSTUB POST $query $datafile)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send text file to topic in mr-stub.
+# arg: <topic-url> <text-file>
+# (Function for test scripts)
+mr_api_send_text_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-file>" $@
+ return 1
+ fi
+ query=$1
+ if [ ! -f $2 ]; then
+ __log_test_fail_general "File $2 does not exist"
+ return 1
+ fi
+
+ res="$(__do_curl_to_api MRSTUB POST $query $2 text/plain)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
return 1
fi
- __log_test_pass
+ __log_conf_ok
+ return 0
+}
+
+# Create json file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_json_payload_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <json-file>" $@
+ return 1
+ fi
+ if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+ __log_conf_fail_general "Only size between 1k and 10000k supported"
+ return 1
+ fi
+ echo -n "{\"a\":[" > $2
+ LEN=$(($1*150))
+ echo -n "\"a0\"" >> $2
+ for ((idx=1; idx<$LEN; idx++))
+ do
+ echo -n ",\"a$idx\"" >> $2
+ done
+ echo -n "]}" >> $2
+
+ __log_conf_ok
+ return 0
+}
+
+# Create tet file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_text_payload_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-file>" $@
+ return 1
+ fi
+ if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+ __log_conf_fail_general "Only size between 1k and 10000k supported"
+ return 1
+ fi
+ echo -n "" > $2
+ LEN=$(($1*100))
+ for ((idx=0; idx<$LEN; idx++))
+ do
+ echo -n "ABCDEFGHIJ" >> $2
+ done
+
+ __log_conf_ok
return 0
}
use_prod_stub_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PRODSTUB_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "PRODSTUB $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "PRODSTUB $PROD_STUB_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Prod stub sim
:
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PVCCLEANER_statisics_setup() {
+ echo ""
+}
+
#######################################################
# This is a system app, all usage in testcase_common.sh
\ No newline at end of file
use_rapp_catalogue_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RC_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "RC $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "RC $RAPP_CAT_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Rapp catalogue
use_simulator_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RICSIM_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo ""
+ else
+ echo ""
+ fi
+}
+
#######################################################
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
# Policy Agent image and tags
POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
# SDNC A1 Controller remote image and tag
SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
#ONAP Zookeeper remote image and tag
ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
-ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
#No local image for ONAP Zookeeper, remote image always used
#ONAP Kafka remote image and tag
ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
-ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
#No local image for ONAP Kafka, remote image always used
#ONAP DMAAP-MR remote image and tag
ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
-ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
#No local image for ONAP DMAAP-MR, remote image always used
#Kube proxy remote image and tag
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback receiver"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback receiver"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
+PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls
+
DMAAP_ADP_APP_NAME="dmaapadapterservice" # Name for Dmaap Adapter container
DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service" # Display name for Dmaap Adapter container
DMAAP_ADP_EXTERNAL_PORT=9087 # Dmaap Adapter container external port (host -> container)
#DMAAP_MED_CERT_MOUNT_DIR="./cert"
DMAAP_MED_ALIVE_URL="/status" # Base path for alive check
DMAAP_MED_COMPOSE_DIR="dmaapmed" # Dir in simulator_group for docker-compose
-#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
-DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
-DMAAP_MED_DATA_FILE="type_config.json" # Container data file name
-#DMAAP_MED_CONFIG_FILE=application.yaml # Config file name
-
-PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container
-PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner
-PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls
+#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json" # Container data file name
########################################
# Setting for common curl-base function
########################################
-UUID="" # UUID used as prefix to the policy id to simulate a real UUID
- # Testscript need to set the UUID otherwise this empty prefix is used
+UUID="" # UUID used as prefix to the policy id to simulate a real UUID
+ # Testscript need to set the UUID otherwise this empty prefix is used
echo " [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+] [--use-snapshot-image <app-nam>+]"
echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
echo " [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
- echo " [--override <override-environment-filename> --pre-clean]"
+ echo " [--override <override-environment-filename> --pre-clean --gen-stats]"
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
echo "--print-stats - Print current test stats after each test."
echo "--override <file> - Override setting from the file supplied by --env-file"
echo "--pre-clean - Will clean kube resouces when running docker and vice versa"
+ echo "--gen-stats - Collect container/pod runtime statistics"
echo ""
echo "List of app short names supported: "$APP_SHORT_NAMES
#Var to control if current stats shall be printed
PRINT_CURRENT_STATS=0
+#Var to control if container/pod runtim statistics shall be collected
+COLLECT_RUNTIME_STATS=0
+
#File to keep deviation messages
DEVIATION_FILE=".tmp_deviations"
rm $DEVIATION_FILE &> /dev/null
}
trap trap_fnc ERR
+# Trap to kill subprocesses
+trap "kill 0" EXIT
+
# Counter for tests
TEST_SEQUENCE_NR=1
foundparm=0
fi
fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--gen-stats" ]; then
+ COLLECT_RUNTIME_STATS=1
+ echo "Option set - Collect runtime statistics"
+ shift;
+ foundparm=0
+ fi
+ fi
+
done
echo ""
fi
fi
if [ $RUNMODE == "DOCKER" ]; then
- tmp=$(docker-compose version | grep -i 'Docker Compose version')
+ tmp=$(docker-compose version | grep -i 'docker' | grep -i 'compose' | grep -i 'version')
if [[ "$tmp" == *'v2'* ]]; then
echo -e $RED"docker-compose is using docker-compose version 2"$ERED
echo -e $RED"The test environment only support version 1"$ERED
echo -e $BOLD"======================================================="$EBOLD
echo ""
+ LOG_STAT_ARGS=""
+
for imagename in $APP_SHORT_NAMES; do
__check_included_image $imagename
retcode_i=$?
function_pointer="__"$imagename"_initial_setup"
$function_pointer
+
+ function_pointer="__"$imagename"_statisics_setup"
+ LOG_STAT_ARGS=$LOG_STAT_ARGS" "$($function_pointer)
fi
done
+ if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+ ../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS &
+ fi
+
}
# Function to print the test result, shall be the last cmd in a test script
echo "Timer measurement in the test script"
echo "===================================="
column -t -s $'\t' $TIMER_MEASUREMENTS
+ if [ $RES_PASS != $RES_TEST ]; then
+ echo -e $RED"Measurement may not be reliable when there are failed test - script timeouts may cause long measurement values"$ERED
+ fi
echo ""
+ if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+ echo "Runtime statistics collected in file: "$TESTLOGS/$ATC/stat_data.csv
+ echo ""
+ fi
+
total=$((RES_PASS+RES_FAIL))
if [ $RES_TEST -eq 0 ]; then
echo -e "\033[1mNo tests seem to have been executed. Check the script....\033[0m"
return 0
}
-# Function to create a configmap in kubernetes
-# args: <configmap-name> <namespace> <labelname> <labelid> <path-to-data-file> <path-to-output-yaml>
-# (Not for test scripts)
-__kube_create_configmapXXXXXXXXXXXXX() {
- echo -ne " Creating configmap $1 "$SAMELINE
- #envsubst < $5 > $5"_tmp"
- #cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name
- kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Failed $ERED"
- ((RES_CONF_FAIL++))
- return 1
- fi
-
- kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Apply failed $ERED"
- echo " Message: $(<./tmp/kubeerr)"
- ((RES_CONF_FAIL++))
- return 1
- fi
- kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Labeling failed $ERED"
- echo " Message: $(<./tmp/kubeerr)"
- ((RES_CONF_FAIL++))
- return 1
- fi
- # Log the resulting map
- kubectl get configmap $1 -n $2 -o yaml > $6
-
- echo -e " Creating configmap $1 $GREEN OK $EGREEN"
- return 0
-}
-
# This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
# The function retries up to the timeout given in the cmd flag '--cluster-timeout'
# args: <full kubectl cmd with parameters>
if [ $PRE_CLEAN -eq 1 ]; then
echo " Clean docker resouces to free up resources, may take time..."
../common/clean_docker.sh 2&>1 /dev/null
+ echo ""
fi
else
__clean_containers
if [ $PRE_CLEAN -eq 1 ]; then
- echo " Clean kubernetes resouces to free up resources, may take time..."
+ echo " Cleaning kubernetes resouces to free up resources, may take time..."
../common/clean_kube.sh 2&>1 /dev/null
+ echo ""
fi
fi
}
import logging
import socket
from threading import RLock
+from hashlib import md5
# Disable all logging of GET on reading counters and db
class AjaxFilter(logging.Filter):
# Request and response constants
CALLBACK_URL="/callbacks/<string:id>"
CALLBACK_MR_URL="/callbacks-mr/<string:id>" #Json list with string encoded items
+CALLBACK_TEXT_URL="/callbacks-text/<string:id>" # Callback for string of text
APP_READ_URL="/get-event/<string:id>"
APP_READ_ALL_URL="/get-all-events/<string:id>"
DUMP_ALL_URL="/db"
cntr_callbacks[id][1]+=1
msg=msg_callbacks[id][0]
print("Fetching msg for id: "+id+", msg="+str(msg))
- del msg[TIME_STAMP]
+
+ if (isinstance(msg,dict)):
+ del msg[TIME_STAMP]
+ if ("md5" in msg.keys()):
+ print("EXTRACTED MD5")
+ msg=msg["md5"]
+ print("MD5: "+str(msg))
+
del msg_callbacks[id][0]
return json.dumps(msg),200
print("No messages for id: "+id)
msg=msg_callbacks[id]
print("Fetching all msgs for id: "+id+", msg="+str(msg))
for sub_msg in msg:
- del sub_msg[TIME_STAMP]
+ if (isinstance(sub_msg, dict)):
+ del sub_msg[TIME_STAMP]
del msg_callbacks[id]
return json.dumps(msg),200
print("No messages for id: "+id)
with lock:
cntr_msg_callbacks += 1
- msg[TIME_STAMP]=str(datetime.now())
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
if (id in msg_callbacks.keys()):
msg_callbacks[id].append(msg)
else:
return 'OK',200
-# Receive a json callback message with payload fromatted accoirding to output frm the message router
-# URI and payload, (PUT or POST): /callbacks/<id> <json messages>
+# Receive a json callback message with payload formatted according to output from the message router
+# Array of stringified json objects
+# URI and payload, (PUT or POST): /callbacks-mr/<id> <json messages>
# json is a list of string encoded json items
# response: OK 200 or 500 for other errors
@app.route(CALLBACK_MR_URL,
global msg_callbacks
global cntr_msg_callbacks
+ storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+ #Large payloads will otherwise overload the server
try:
print("Received callback (mr) for id: "+id +", content-type="+request.content_type)
- remote_host_logging(request)
print("raw data: str(request.data): "+str(request.data))
+ if (storeas is None):
+ print("raw data: str(request.data): "+str(request.data))
do_delay()
try:
#if (request.content_type == MIME_JSON):
if (MIME_JSON in request.content_type):
data = request.data
msg_list = json.loads(data)
- print("Payload(json): "+str(msg_list))
+ if (storeas is None):
+ print("Payload(json): "+str(msg_list))
else:
msg_list=[]
print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
with lock:
remote_host_logging(request)
for msg in msg_list:
- print("msg (str): "+str(msg))
- msg=json.loads(msg)
- print("msg (json): "+str(msg))
+ if (storeas is None):
+ msg=json.loads(msg)
+ else:
+ #Convert to compact json without ws between parameter and value...
+ #It seem that ws is added somewhere along to way to this server
+ msg=json.loads(msg)
+ msg=json.dumps(msg, separators=(',', ':'))
+
+ md5msg={}
+ md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+ msg=md5msg
+ print("msg (json converted to md5 hash): "+str(msg["md5"]))
cntr_msg_callbacks += 1
- msg[TIME_STAMP]=str(datetime.now())
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
if (id in msg_callbacks.keys()):
msg_callbacks[id].append(msg)
else:
return 'OK',200
+# Receive a callback message of a single text message (content type ignored)
+# or a json array of strings (content type json)
+# URI and payload, (PUT or POST): /callbacks-text/<id> <text message>
+# response: OK 200 or 500 for other errors
+@app.route(CALLBACK_TEXT_URL,
+ methods=['PUT','POST'])
+def events_write_text(id):
+ global msg_callbacks
+ global cntr_msg_callbacks
+
+ storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+ #Large payloads will otherwise overload the server
+ try:
+ print("Received callback for id: "+id +", content-type="+request.content_type)
+ remote_host_logging(request)
+ if (storeas is None):
+ print("raw data: str(request.data): "+str(request.data))
+ do_delay()
+
+ try:
+ msg_list=None
+ if (MIME_JSON in request.content_type): #Json array of strings
+ msg_list=json.loads(request.data)
+ else:
+ data=request.data.decode("utf-8") #Assuming string
+ msg_list=[]
+ msg_list.append(data)
+
+ for msg in msg_list:
+ if (storeas == "md5"):
+ md5msg={}
+ print("msg: "+str(msg))
+ print("msg (endcode str): "+str(msg.encode('utf-8')))
+ md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+ msg=md5msg
+ print("msg (data converted to md5 hash): "+str(msg["md5"]))
+
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
+
+ with lock:
+ cntr_msg_callbacks += 1
+ if (id in msg_callbacks.keys()):
+ msg_callbacks[id].append(msg)
+ else:
+ msg_callbacks[id]=[]
+ msg_callbacks[id].append(msg)
+
+ if (id in cntr_callbacks.keys()):
+ cntr_callbacks[id][0] += 1
+ else:
+ cntr_callbacks[id]=[]
+ cntr_callbacks[id].append(1)
+ cntr_callbacks[id].append(0)
+ except Exception as e:
+ print(CAUGHT_EXCEPTION+str(e))
+ traceback.print_exc()
+ return 'NOTOK',500
+
+
+ except Exception as e:
+ print(CAUGHT_EXCEPTION+str(e))
+ traceback.print_exc()
+ return 'NOTOK',500
+
+ return 'OK',200
+
### Functions for test ###
# Dump the whole db of current callbacks
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:2222;
+
+ client_max_body_size 0;
}
+
}
##
# SSL Settings
topic_write=""
topic_read=""
+generic_topics_upload_baseurl=""
uploader_thread=None
downloader_thread=None
+generic_uploader_thread=None
-# Function to download messages from dmaap
+# Function to upload PMS messages to dmaap
def dmaap_uploader():
global msg_requests
global cntr_msg_requests_fetched
sleep(0.01)
-# Function to upload messages to dmaap
+# Function to download PMS messages from dmaap
def dmaap_downloader():
global msg_responses
global cntr_msg_responses_submitted
except Exception as e:
sleep(1)
+# Function to upload generic messages to dmaap
+def dmaap_generic_uploader():
+ global msg_requests
+ global cntr_msg_requests_fetched
+
+ print("Starting generic uploader")
+
+ headers_json = {'Content-type': 'application/json', 'Accept': '*/*'}
+ headers_text = {'Content-type': 'text/plain', 'Accept': '*/*'}
+
+ while True:
+ if (len(generic_messages)):
+ for topicname in generic_messages.keys(): #topicname contains the path of the topics, eg. "/event/<topic>"
+ topic_queue=generic_messages[topicname]
+ if (len(topic_queue)>0):
+ if (topicname.endswith(".text")):
+ msg=topic_queue[0]
+ headers=headers_text
+ else:
+ msg=topic_queue[0]
+ msg=json.dumps(msg)
+ headers=headers_json
+ url=generic_topics_upload_baseurl+topicname
+ print("Sending to dmaap : "+ url)
+ print("Sending to dmaap : "+ msg)
+ print("Sending to dmaap : "+ str(headers))
+ try:
+ resp=requests.post(url, data=msg, headers=headers, timeout=10)
+ if (resp.status_code<199 & resp.status_code > 299):
+ print("Failed, response code: " + str(resp.status_code))
+ sleep(1)
+ else:
+ print("Dmaap response code: " + str(resp.status_code))
+ print("Dmaap response text: " + str(resp.text))
+ with lock:
+ topic_queue.pop(0)
+ cntr_msg_requests_fetched += 1
+ except Exception as e:
+ print("Failed, exception: "+ str(e))
+ sleep(1)
+ sleep(0.01)
+
#I'm alive function
@app.route('/',
methods=['GET'])
return 'OK', 200
-# Helper function to create a Dmaap request message
+# Helper function to create a Dmaap PMS request message
# args : <GET|PUT|DELETE> <correlation-id> <json-string-payload - may be None> <url>
# response: json formatted string of a complete Dmaap message
def create_message(operation, correlation_id, payload, url):
### MR-stub interface, for MR control
-# Send a message to MR
+# Send a PMS message to MR
# URI and parameters (PUT or POST): /send-request?operation=<GET|PUT|POST|DELETE>&url=<url>
# response: <correlation-id> (http 200) o4 400 for parameter error or 500 for other errors
@app.route(APP_WRITE_URL,
print(APP_WRITE_URL+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT)
-# Receive a message response for MR for the included correlation id
+# Receive a PMS message response for MR for the included correlation id
# URI and parameter, (GET): /receive-response?correlationid=<correlation-id>
# response: <json-array of 1 response> 200 or empty 204 or other errors 500
@app.route(APP_READ_URL,
### Dmaap interface ###
-# Read messages stream. URI according to agent configuration.
+# Read PMS messages stream. URI according to agent configuration.
# URI, (GET): /events/A1-POLICY-AGENT-READ/users/policy-agent
# response: 200 <json array of request messages>, or 500 for other errors
@app.route(AGENT_READ_URL,
print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time))
return Response("[]", status=200, mimetype=MIME_JSON)
-# Write messages stream. URI according to agent configuration.
+# Write PMS messages stream. URI according to agent configuration.
# URI and payload, (PUT or POST): /events/A1-POLICY-AGENT-WRITE <json array of response messages>
# response: OK 200 or 400 for missing json parameters, 500 for other errors
@app.route(AGENT_WRITE_URL,
return Response(json.dumps(res), status=200, mimetype=MIME_JSON)
return Response("[]", status=200, mimetype=MIME_JSON)
-# Generic POST/PUT catching all urls starting with /events/<topic>.
+# Generic POST catching all urls starting with /events/<topic>.
# Writes the message in a que for that topic
@app.route("/events/<path>",
- methods=['PUT','POST'])
+ methods=['POST'])
def generic_write(path):
global generic_messages
global cntr_msg_responses_submitted
write_method=str(request.method)
with lock:
try:
- payload=request.json
- print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
+ if (urlkey.endswith(".text")):
+ payload=str(request.data.decode('UTF-8'))
+ print(write_method+" on "+urlkey+" text=" + payload)
+ else:
+ payload=request.json
+ print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
topicmsgs=[]
if (urlkey in generic_messages.keys()):
topicmsgs=generic_messages[urlkey]
global generic_messages
global cntr_msg_requests_fetched
+ if generic_topics_upload_baseurl:
+ return Response('Url not available when running as mrstub frontend', status=404, mimetype=MIME_TEXT)
+
urlpath="/events/"+str(path)
urlkey="/events/"+str(path).split("/")[0] #Extract topic
print("GET on topic"+urlkey)
uploader_thread=Thread(target=dmaap_uploader)
uploader_thread.start()
-else:
+if os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is not None:
+ print("GENERIC_TOPICS_UPLOAD_BASEURL:"+os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'])
+ generic_topics_upload_baseurl=os.environ['GENERIC_TOPICS_UPLOAD_BASEURL']
+ if generic_topics_upload_baseurl and generic_uploader_thread is None:
+ generic_uploader_thread=Thread(target=dmaap_generic_uploader)
+ generic_uploader_thread.start()
+
+if os.getenv("TOPIC_READ") is None or os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is None:
print("No env variables - OK")
if __name__ == "__main__":
# serve dynamic requests
location / {
- proxy_pass http://localhost:2222;
+ proxy_pass http://localhost:2222;
+ client_max_body_size 0;
}
}
##
configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json
dmaap-base-url: $MR_SERVICE_PATH
# The url used to adress this component. This is used as a callback url sent to other components.
- dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
\ No newline at end of file
+ dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
+ # KAFKA boostrap server. This is only needed if there are Information Types that uses a kafkaInputTopic
+ kafka:
+ bootstrap-servers: $MR_KAFKA_SERVICE_PATH
"types": [
{
"id": "ExampleInformationType",
- "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs",
+ "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=15000&limit=100",
"useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
- }
+ },
+ {
+ "id": "ExampleInformationTypeKafka",
+ "kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text",
+ "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
+ }
]
}
\ No newline at end of file
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
- name: DMAAP_MR_ADDR
value: "$MR_SERVICE_PATH"
- name: LOG_LEVEL
- value: "Debug"
+ value: Debug
volumes:
- configMap:
defaultMode: 420
- INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT}
- INFO_COORD_ADDR=${ECS_SERVICE_PATH}
- DMAAP_MR_ADDR=${MR_SERVICE_PATH}
- - LOG_LEVEL="Debug"
+ - LOG_LEVEL=Debug
volumes:
- ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
labels:
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
[
{
"id": "STD_Fault_Messages",
- "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages"
+ "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100"
}
]
}
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
- name: $MR_DMAAP_KUBE_APP_NAME
+ name: $MR_DMAAP_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
replicas: 1
selector:
matchLabels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
template:
metadata:
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
containers:
- - name: $MR_DMAAP_KUBE_APP_NAME
+ - name: $MR_DMAAP_APP_NAME
image: $ONAP_DMAAPMR_IMAGE
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
subPath: MsgRtrApi.properties
name: dmaapmr-msg-rtr-api
- volumeMounts:
- mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
subPath: logback.xml
name: dmaapmr-log-back
- volumeMounts:
- mountPath: /appl/dmaapMR1/etc/cadi.properties
subPath: cadi.properties
name: dmaapmr-cadi
apiVersion: apps/v1
kind: Deployment
metadata:
- name: $MR_KAFKA_BWDS_NAME
+ name: $MR_KAFKA_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
replicas: 1
selector:
matchLabels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
template:
metadata:
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
containers:
- - name: $MR_KAFKA_BWDS_NAME
+ - name: $MR_KAFKA_APP_NAME
image: $ONAP_KAFKA_IMAGE
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- name: http
- containerPort: 9095
+ containerPort: $MR_KAFKA_PORT
env:
- name: enableCadi
value: 'false'
- name: KAFKA_ZOOKEEPER_CONNECT
- value: 'zookeeper.onap:2181'
+ value: '$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT'
- name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS
value: '40000'
- name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
- name: KAFKA_ADVERTISED_LISTENERS
- value: 'INTERNAL_PLAINTEXT://kaka:9092'
-# - name: KAFKA_ADVERTISED_LISTENERS
-# value: 'INTERNAL_PLAINTEXT://localhost:9092'
+ value: 'INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT'
- name: KAFKA_LISTENERS
- value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092'
+ value: 'INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
+ # - name: KAFKA_LISTENERS
+ # value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: INTERNAL_PLAINTEXT
- name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
- name: KAFKA_OPTS
value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf'
- name: KAFKA_ZOOKEEPER_SET_ACL
- value: 'true'
+ value: 'false'
- name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
value: '1'
- name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
value: '1'
-
volumeMounts:
- mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
subPath: zk_client_jaas.conf
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- name: http
- containerPort: 2181
+ containerPort: $MR_ZOOKEEPER_PORT
env:
- name: ZOOKEEPER_REPLICAS
value: '1'
- name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
value: '24'
- name: ZOOKEEPER_CLIENT_PORT
- value: '2181'
+ value: '$MR_ZOOKEEPER_PORT'
- name: KAFKA_OPTS
value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
- name: ZOOKEEPER_SERVER_ID
# LICENSE_START=======================================================
# org.onap.dmaap
# ================================================================================
+# Copyright © 2021 Nordix Foundation. All rights reserved.
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
##
## Both Cambria and Kafka make use of Zookeeper.
##
-config.zk.servers=zookeeper:2181
+config.zk.servers=$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
###############################################################################
##
## if you want to change request.required.acks it can take this one value
#kafka.metadata.broker.list=localhost:9092,localhost:9093
#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
##kafka.request.required.acks=-1
#kafka.client.zookeeper=${config.zk.servers}
consumer.timeout.ms=100
cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
consumer.timeout=17
default.partitions=3
-default.replicas=3
+default.replicas=1
##############################################################################
#100mb
maxcontentlength=10000
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
<!--
============LICENSE_START=======================================================
+ Copyright © 2021 Nordix Foundation. All rights reserved.
Copyright © 2019 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
image: $ONAP_ZOOKEEPER_IMAGE
container_name: $MR_ZOOKEEPER_APP_NAME
ports:
- - "2181:2181"
+ - "$MR_ZOOKEEPER_PORT:$MR_ZOOKEEPER_PORT"
environment:
ZOOKEEPER_REPLICAS: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_MAX_CLIENT_CNXNS: 200
ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
- ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_CLIENT_PORT: $MR_ZOOKEEPER_PORT
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl -Dzookeeper.4lw.commands.whitelist=*
ZOOKEEPER_SERVER_ID: 1
volumes:
image: $ONAP_KAFKA_IMAGE
container_name: $MR_KAFKA_APP_NAME
ports:
- - "9092:9092"
+ - "$MR_KAFKA_PORT:$MR_KAFKA_PORT"
environment:
enableCadi: 'false'
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ZOOKEEPER_CONNECT: $MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
- KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
- KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+ KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
+ KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
Client {
- org.apache.zookeeper.server.auth.DigestLoginModule required
- username="kafka"
- password="kafka_secret";
- };
-
+ org.apache.zookeeper.server.auth.DigestLoginModule required
+ username="kafka"
+ password="kafka_secret";
+ };
\ No newline at end of file
+++ /dev/null
-# LICENSE_START=======================================================
-# org.onap.dmaap
-# ================================================================================
-# Copyright © 2020 Nordix Foundation. All rights reserved.
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
-config.zk.servers=zookeeper.onap:2181
-
-#config.zk.root=/fe3c/cambria/config
-
-
-###############################################################################
-##
-## Kafka Connection
-##
-## Items below are passed through to Kafka's producer and consumer
-## configurations (after removing "kafka.")
-## if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=akfak-bwds.onap:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-## Secured Config
-##
-## Some data stored in the config system is sensitive -- API keys and secrets,
-## for example. to protect it, we use an encryption layer for this section
-## of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-## Kafka expects live connections from the consumer to the broker, which
-## obviously doesn't work over connectionless HTTP requests. The Cambria
-## server proxies HTTP requests into Kafka consumer sessions that are kept
-## around for later re-use. Not doing so is costly for setup per request,
-## which would substantially impact a high volume consumer's performance.
-##
-## This complicates Cambria server failover, because we often need server
-## A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-## This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
# LICENSE_START=======================================================
# org.onap.dmaap
# ================================================================================
-# Copyright © 2020 Nordix Foundation. All rights reserved.
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
##
## Both Cambria and Kafka make use of Zookeeper.
##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
config.zk.servers=zookeeper:2181
-#config.zk.root=/fe3c/cambria/config
-
-
###############################################################################
##
## Kafka Connection
## if you want to change request.required.acks it can take this one value
#kafka.metadata.broker.list=localhost:9092,localhost:9093
#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=message-router-kafka:9092
##kafka.request.required.acks=-1
#kafka.client.zookeeper=${config.zk.servers}
consumer.timeout.ms=100
cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
###############################################################################
## This server can report its metrics periodically on a topic.
##
#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
#metrics.send.cambria.sendEverySeconds=60
cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
consumer.timeout=17
default.partitions=3
-default.replicas=3
+default.replicas=1
##############################################################################
#100mb
maxcontentlength=10000
kafka.max.poll.interval.ms=300000
kafka.heartbeat.interval.ms=60000
kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
+kafka.max.poll.records=1000
\ No newline at end of file
-aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
aaf_env=DEV
aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
cadi_alias=dmaapmr@mr.dmaap.onap.org
cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
cadi_loglevel=INFO
cadi_protocols=TLSv1.1,TLSv1.2
cadi_latitude=37.78187
-cadi_longitude=-122.26147
-
+cadi_longitude=-122.26147
\ No newline at end of file
<!--
============LICENSE_START=======================================================
- Copyright © 2020 Nordix Foundation. All rights reserved.
- Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+ Copyright © 2019 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
</root>
</configuration>
-
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
- user_kafka=kafka_secret;
-};
-
+ user_kafka="kafka_secret";
+};
\ No newline at end of file
+++ /dev/null
-# LICENSE_START=======================================================
-# org.onap.dmaap
-# ================================================================================
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-config.zk.servers=zookeeper:2181
-
-###############################################################################
-##
-## Kafka Connection
-##
-## Items below are passed through to Kafka's producer and consumer
-## configurations (after removing "kafka.")
-## if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kaka:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-## Secured Config
-##
-## Some data stored in the config system is sensitive -- API keys and secrets,
-## for example. to protect it, we use an encryption layer for this section
-## of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-## Kafka expects live connections from the consumer to the broker, which
-## obviously doesn't work over connectionless HTTP requests. The Cambria
-## server proxies HTTP requests into Kafka consumer sessions that are kept
-## around for later re-use. Not doing so is costly for setup per request,
-## which would substantially impact a high volume consumer's performance.
-##
-## This complicates Cambria server failover, because we often need server
-## A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-## This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics
-#msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
\ No newline at end of file
+++ /dev/null
-#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
-aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
-aaf_env=DEV
-aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
-
-cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
-
-cadi_alias=dmaapmr@mr.dmaap.onap.org
-cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
-cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
-
-cadi_loglevel=INFO
-cadi_protocols=TLSv1.1,TLSv1.2
-cadi_latitude=37.78187
-cadi_longitude=-122.26147
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
- name: $MR_DMAAP_KUBE_APP_NAME
+ name: $MR_DMAAP_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
type: ClusterIP
protocol: TCP
name: https
selector:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
---
apiVersion: v1
kind: Service
metadata:
- name: $MR_KAFKA_BWDS_NAME
+ name: $MR_KAFKA_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
type: ClusterIP
ports:
- - port: 9092
- targetPort: 9095
+ - port: $MR_KAFKA_PORT
+ targetPort: $MR_KAFKA_PORT
protocol: TCP
name: http
selector:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
---
apiVersion: v1
kind: Service
spec:
type: ClusterIP
ports:
- - port: 2181
- targetPort: 2181
+ - port: $MR_ZOOKEEPER_PORT
+ targetPort: $MR_ZOOKEEPER_PORT
protocol: TCP
name: http
selector:
run: $MR_ZOOKEEPER_APP_NAME
-
-
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-mr
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_DMAAP_KUBE_APP_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: $MR_EXTERNAL_PORT
-# targetPort: $MR_INTERNAL_PORT
-# protocol: TCP
-# name: http
-# - port: $MR_EXTERNAL_SECURE_PORT
-# targetPort: $MR_INTERNAL_SECURE_PORT
-# protocol: TCP
-# name: https
-# selector:
-# run: $MR_DMAAP_KUBE_APP_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-kafka
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_KAFKA_BWDS_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 9092
-# targetPort: 9092
-# protocol: TCP
-# name: http
-# selector:
-# run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: kafka
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_KAFKA_BWDS_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 9092
-# targetPort: 9092
-# protocol: TCP
-# name: http
-# selector:
-# run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-zookeeper
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_ZOOKEEPER_APP_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 2181
-# targetPort: 2181
-# protocol: TCP
-# name: http
-# selector:
- run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file
- name: TOPIC_READ
value: $TOPIC_READ
- name: TOPIC_WRITE
- value: $TOPIC_WRITE
\ No newline at end of file
+ value: $TOPIC_WRITE
+ - name: GENERIC_TOPICS_UPLOAD_BASEURL
+ value: $GENERIC_TOPICS_UPLOAD_BASEURL
\ No newline at end of file
environment:
- TOPIC_READ=${TOPIC_READ}
- TOPIC_WRITE=${TOPIC_WRITE}
+ - GENERIC_TOPICS_UPLOAD_BASEURL=${GENERIC_TOPICS_UPLOAD_BASEURL}
labels:
- "nrttest_app=MR"
- "nrttest_dp=${MR_STUB_DISPLAY_NAME}"
kind: Deployment
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_DB_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_DB_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: dbhost
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: sdnctldb01
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC