Updates of function test for f-release 75/7475/1
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Mon, 27 Dec 2021 12:38:01 +0000 (13:38 +0100)
committerBjornMagnussonXA <bjorn.magnusson@est.tech>
Mon, 27 Dec 2021 13:08:04 +0000 (14:08 +0100)
New test profile for f-release
New test profile for onap jakarta release
Updated applicable testcases for new profiles
Testcase for helm manager
Integration of helm, helm manager and chartmuseum
Updated README files

Issue-ID: NONRTRIC-690

Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: Iaf177231bbfb3411595d2665149bc1e773e0f911

62 files changed:
test/auto-test/FTC1.sh
test/auto-test/FTC10.sh
test/auto-test/FTC100.sh
test/auto-test/FTC110.sh
test/auto-test/FTC1100.sh
test/auto-test/FTC150.sh
test/auto-test/FTC1800.sh
test/auto-test/FTC2001.sh
test/auto-test/FTC2002.sh
test/auto-test/FTC2003.sh
test/auto-test/FTC300.sh
test/auto-test/FTC3000.sh
test/auto-test/FTC310.sh
test/auto-test/FTC350.sh
test/auto-test/FTC4000.sh [new file with mode: 0755]
test/auto-test/FTC800.sh
test/auto-test/FTC805.sh
test/auto-test/FTC810.sh
test/auto-test/FTC850.sh
test/auto-test/FTC900.sh
test/auto-test/ONAP_UC.sh
test/auto-test/PM_DEMO.sh
test/auto-test/PM_EI_DEMO.sh
test/auto-test/README.md
test/auto-test/startMR.sh
test/common/README.md
test/common/chartmus_api_functions.sh [new file with mode: 0644]
test/common/clean_kube.sh
test/common/consul_api_functions.sh
test/common/cp_api_functions.sh
test/common/cr_api_functions.sh
test/common/dmaapadp_api_functions.sh
test/common/dmaapmed_api_functions.sh
test/common/helmmanager_api_functions.sh [new file with mode: 0644]
test/common/httpproxy_api_functions.sh
test/common/ics_api_functions.sh
test/common/kafkapc_api_functions.sh
test/common/kubeproxy_api_functions.sh
test/common/localhelm_api_functions.sh [new file with mode: 0644]
test/common/mr_api_functions.sh
test/common/ngw_api_functions.sh
test/common/pa_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/pvccleaner_api_functions.sh
test/common/rc_api_functions.sh
test/common/ricsim_api_functions.sh
test/common/sdnc_api_functions.sh
test/common/test_env-onap-istanbul.sh
test/common/test_env-onap-jakarta.sh [new file with mode: 0644]
test/common/test_env-oran-e-release.sh
test/common/test_env-oran-f-release.sh [new file with mode: 0755]
test/common/testcase_common.sh
test/common/testengine_config.sh
test/simulator-group/chartmuseum/.gitignore [new file with mode: 0644]
test/simulator-group/chartmuseum/app.yaml [new file with mode: 0644]
test/simulator-group/chartmuseum/docker-compose.yml [new file with mode: 0644]
test/simulator-group/chartmuseum/svc.yaml [new file with mode: 0644]
test/simulator-group/helmmanager/.gitignore [new file with mode: 0644]
test/simulator-group/helmmanager/app.yaml [new file with mode: 0644]
test/simulator-group/helmmanager/docker-compose.yml [new file with mode: 0644]
test/simulator-group/helmmanager/sa.yaml [new file with mode: 0644]
test/simulator-group/helmmanager/svc.yaml [new file with mode: 0644]

index 1e34405..fe02bf2 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index a561cc6..50cf98f 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index da623ce..670ea5e 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index f855f6f..5b84084 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index cf172a0..265db07 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 822f835..8df6f08 100755 (executable)
@@ -28,7 +28,7 @@ KUBE_INCLUDED_IMAGES=" RICSIM SDNC KUBEPROXY"
 KUBE_PRESTARTED_IMAGES=" "
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 0948b65..749ba82 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index fa1aea1..7c1202c 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 1b05763..37d3576 100755 (executable)
@@ -27,7 +27,7 @@ KUBE_INCLUDED_IMAGES=""
 KUBE_PRESTARTED_IMAGES=" "
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-ISTANBUL"
+SUPPORTED_PROFILES="ONAP-ISTANBUL ONAP-JAKARTA"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER"
 
index 32412b7..824ff3e 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 232e5a8..a53dc36 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 5b8544a..2293919 100755 (executable)
@@ -34,7 +34,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 53437e8..a7360d0 100755 (executable)
@@ -24,7 +24,7 @@ TC_ONELINE_DESCR="Resync of RIC via changes in the consul config or pushed confi
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER"
 
index 31e40ab..d78be6c 100755 (executable)
@@ -28,7 +28,7 @@ KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
 KUBE_PRESTARTED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
diff --git a/test/auto-test/FTC4000.sh b/test/auto-test/FTC4000.sh
new file mode 100755 (executable)
index 0000000..227c04f
--- /dev/null
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="Test of Helm Manager"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="KUBEPROXY CHARTMUS LOCALHELM HELMMANAGER"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES="KUBEPROXY CHARTMUS LOCALHELM HELMMANAGER"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh $@
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+clean_environment
+
+start_kube_proxy
+
+start_chart_museum
+
+localhelm_create_test_chart dummy
+
+localhelm_package_test_chart dummy
+
+chartmus_upload_test_chart dummy
+
+clean_and_create_namespace test-ns
+
+localhelm_installed_chart_release NOTINSTALLED test-release test-ns
+
+start_helm_manager
+
+helm_manager_api_get_charts 200 EMPTY
+
+helm_manager_api_exec_add_repo cm $CHART_MUS_SERVICE_PATH
+
+helm_manager_api_post_repo 201 cm $CHART_MUS_SERVICE_HTTPX $CHART_MUS_SERVICE_HOST $CHART_MUS_SERVICE_PORT
+
+helm_manager_api_post_onboard_chart 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_post_install_chart 201 dummy DEFAULT-VERSION
+
+localhelm_installed_chart_release INSTALLED test-release test-ns
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_uninstall_chart 204 dummy DEFAULT-VERSION
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_delete_chart 204 dummy DEFAULT-VERSION
+
+helm_manager_api_get_charts 200 EMPTY
+
+localhelm_installed_chart_release NOTINSTALLED test-release test-ns
+
+#### TEST COMPLETE ####
+
+store_logs          END
+
+print_result
+
+auto_clean_environment
+
+
+
index e509f6c..931610b 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index af46814..c7aecdd 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-ISTANBUL ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-ISTANBUL ONAP-JAKARTA ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index ad71f46..9fd1200 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index bd61b3a..cb29618 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 886b664..a1ae6e4 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 5d23034..cce6993 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index f3d5dd4..802f8f5 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 4e6b87c..0eb8946 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES="NGW"
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index 793b92e..a6eab40 100644 (file)
@@ -56,6 +56,12 @@ ORAN E-RELEASE
 ORAN E-RELEASE with nonrtric deployed with helm charts for the "dep" repo
 >```../FTC_HELM_E_RELEASE.sh remote-remove  kube  release --env-file ../common/test_env-oran-e-release.sh --override override_ftc_helm_e_release.sh```
 
+ORAN F-RELEASE
+=========
+>```./PM_EI_DEMO.sh remote-remove  docker  --env-file ../common/test_env-oran-f-release.sh```
+
+>```./PM_EI_DEMO.sh remote-remove  kube  --env-file ../common/test_env-oran-f-release.sh```
+
 ONAP GUILIN
 ===========
 >```./PM_DEMO.sh remote-remove  docker release   --env-file ../common/test_env-onap-guilin.sh```
@@ -100,6 +106,8 @@ The numbering in each series corresponds to the following groupings
 
 30XX - rApp tests
 
+40XX - Helm Manager tests
+
 Suites
 
 To get an overview of the available test scripts, use the following command to print the test script description:
index 2ae6781..fcc9599 100755 (executable)
@@ -33,7 +33,7 @@ KUBE_PRESTARTED_IMAGES=""
 CONDITIONALLY_IGNORED_IMAGES=""
 
 #Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
 #Supported run modes
 SUPPORTED_RUNMODES="DOCKER KUBE"
 
index dc7b64d..5f806d6 100644 (file)
@@ -11,6 +11,9 @@ A common curl based function for the agent and ics apis. Also partly used for th
 `cbs_api_function.sh` \
 All functions are implemented in `consul_api_function.sh`.
 
+`chartmus_api_functions.sh` \
+Contains functions for managing a Chartmuseum instance.
+
 `clean-docker.sh` \
 Cleans all containers started by the test environment in docker.
 
@@ -62,6 +65,9 @@ A python script to extract the information from an sdnc (A1 Controller) reply js
 `genstat.sh` \
 This script collects container statistics to a file. Works both in docker and kubernetes (only for docker runtime).
 
+`helmmanager_api_functions.sh` \
+Contains functions for managing and testing of the Helm Manager.
+
 `http_proxy_api_functions.sh` \
 Contains functions for managing the Http Proxy.
 
@@ -74,6 +80,9 @@ Contains functions for managing the kafka producer/consumer. Kafka is started by
 `kube_proxy_api_functions.sh` \
 Contains functions for managing the Kube Proxy - to gain access to all services pod inside a kube cluster or all containers in a private docker network.
 
+`localhelm_api_functions.sh` \
+Contains functions for helm access on localhost.
+
 `mr_api_functions.sh` \
 Contains functions for managing the MR Stub and the Dmaap Message Router
 
@@ -134,6 +143,7 @@ This file must implement the following functions used by the test engine. Note t
 | __<app-short_name>_store_docker_logs |
 | __<app-short_name>_initial_setup |
 | __<app-short_name>_statisics_setup |
+| __<app-short_name>_test_requirements |
 
 In addition, all other functions used for testing of the application shall also be added to the file. For example functions to start the application, setting interface parameters as well as functions to send rest call towards the api of the application and validating the result.
 
@@ -283,6 +293,39 @@ Print out the overall result of the executed test cases.
 |--|
 | None |
 
+# Description of functions in chartmus_api_function.sh #
+
+## Function: start_chart_museum ##
+
+Start the Chart Museum
+| arg list |
+|--|
+| None |
+
+## Function: chartmus_upload_test_chart ##
+
+Upload a package chart to chartmusem
+| arg list |
+|--|
+| `<chart-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<chart-name>` | Name of the chart to upload |
+
+## Function: chartmus_delete_test_chart ##
+
+Delete a chart in chartmusem
+| arg list |
+|--|
+| `<chart-name> [<version>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<chart-name>` | Name of the chart to delete |
+| `<version>` | Chart version, default is 0.1.0 |
+
+
 # Description of functions in consul_api_function.sh #
 
 ## Function: consul_config_app ##
@@ -623,6 +666,130 @@ Start the http proxy container in docker or kube depending on running mode.
 |--|
 | None |
 
+# Description of functions in helmmanager_api_functions.sh #
+
+## Function: use_helm_manager_http ##
+
+Use http for all API calls to the Helm Manager. This is the default protocol.
+| arg list |
+|--|
+| None |
+
+## Function: use_helm_manager_https ##
+
+Use https for all API calls to the Helm Manager.
+| arg list |
+|--|
+| None |
+
+## Function: start_helm_manager ##
+
+Start the Helm Manager container in docker or kube depending on running mode.
+| arg list |
+|--|
+| None |
+
+## Function: helm_manager_api_get_charts ##
+
+Get all charts and compare the expected contents.
+| arg list |
+|--|
+| `<response-code> [ EMPTY | ( <chart> <version> <namespace> <release> <repo> )+ ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `EMPTY` | Indicator for empty list  |
+| `<chart>`| Name of the chart  |
+| `<version>`| Version of the chart  |
+| `<namespace>`| Namespace to of the chart  |
+| `<release>`| Release name of the chart  |
+| `<repo>`| Repository of the chart  |
+
+## Function: helm_manager_api_post_repo ##
+
+Add repo to the helm manager.
+| arg list |
+|--|
+| `<response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<repo-name>` | Name of the repo  |
+| `<repo-protocol>`| Protocol http or https  |
+| `<repo-address>`| Host name of the repo |
+| `<repo-port>`| Host port of the repo  |
+
+## Function: helm_manager_api_post_onboard_chart ##
+
+Onboard a chart to the helm manager.
+| arg list |
+|--|
+| `<response-code> <repo> <chart> <version> <release> <namespace>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<repo>`| Target repo of the chart  |
+| `<chart>`| Name of the chart  |
+| `<version>`| Version of the chart  |
+| `<namespace>`| Namespace to of the chart  |
+| `<release>`| Release name of the chart  |
+
+## Function: helm_manager_api_post_install_chart ##
+
+Install an onboarded chart.
+| arg list |
+|--|
+| `<response-code> <chart> <version>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart  |
+| `<version>`| Version of the chart  |
+
+## Function: helm_manager_api_uninstall_chart ##
+
+Uninstall a chart.
+| arg list |
+|--|
+| `<response-code> <chart> <version>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart  |
+| `<version>`| Version of the chart  |
+
+## Function: helm_manager_api_delete_chart ##
+
+Delete a chart.
+| arg list |
+|--|
+| `<response-code> <chart> <version>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart  |
+| `<version>`| Version of the chart  |
+
+## Function: helm_manager_api_exec_add_repo ##
+
+Add repo in helm manager by helm using exec.
+| arg list |
+|--|
+| `<repo-name> <repo-url>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<repo-name>`| Name of the repo  |
+| `<repo-url>`| Full url to the repo. Url must be accessible by the container  |
+
+
 # Description of functions in ics_api_functions.sh #
 
 ## Function: use_ics_rest_http ##
@@ -1558,6 +1725,44 @@ No proxy is started if the function is called in docker mode.
 |--|
 | None |
 
+# Description of functions in localhelm_api_functions.sh #
+
+## Function: localhelm_create_test_chart ##
+
+Create a dummy chart using helm
+| arg list |
+|--|
+| `chart-name` |
+
+| parameter | description |
+| --------- | ----------- |
+| `chart-name` | Name of the chart |
+
+## Function: localhelm_package_test_chart ##
+
+Package a dummy chart using helm
+| arg list |
+|--|
+| `chart-name` |
+
+| parameter | description |
+| --------- | ----------- |
+| `chart-name` | Name of the chart |
+
+## Function: localhelm_installed_chart_release ##
+
+Check if a chart is installed or not using helm
+| arg list |
+|--|
+| `INSTALLED|NOTINSTALLED <release-name> <name-space> |
+
+| parameter | description |
+| --------- | ----------- |
+| `INSTALLED` | Expecting installed chart |
+| `NOTINSTALLED` | Expecting a not installed chart |
+| `release-name` | Name of the release |
+| `name-space` | Expected namespace |
+
 # Description of functions in mr_api_functions.sh #
 
 ## Function: use_mr_http ##
diff --git a/test/common/chartmus_api_functions.sh b/test/common/chartmus_api_functions.sh
new file mode 100644 (file)
index 0000000..a9f09c0
--- /dev/null
@@ -0,0 +1,316 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service management functions and test functions for Chartmuseum
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__CHARTMUS_imagesetup() {
+       __check_and_create_image_var CHARTMUS "CHART_MUS_IMAGE" "CHART_MUS_IMAGE_BASE" "CHART_MUS_IMAGE_TAG" REMOTE_OTHER "$CHART_MUS_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__CHARTMUS_imagepull() {
+       __check_and_pull_image $2 "$CHART_MUS_DISPLAY_NAME" $CHART_MUS_APP_NAME CHART_MUS_IMAGE
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__CHARTMUS_imagebuild() {
+       echo -e $RED" Image for app CHARTMUS shall never be built"$ERED
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__CHARTMUS_image_data() {
+       echo -e "$CHART_MUS_DISPLAY_NAME\t$(docker images --format $1 $CHART_MUS_IMAGE)" >>   $2
+       if [ ! -z "$CHART_MUS_IMAGE_SOURCE" ]; then
+               echo -e "-- source image --\t$(docker images --format $1 $CHART_MUS_IMAGE_SOURCE)" >>   $2
+       fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__CHARTMUS_kube_scale_zero() {
+       __kube_scale_all_resources $KUBE_SIM_NAMESPACE autotest CHARTMUS
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__CHARTMUS_kube_scale_zero_and_wait() {
+       echo -e $RED" CHARTMUS app is not scaled in this state"$ERED
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__CHARTMUS_kube_delete_all() {
+       __kube_delete_all_resources $KUBE_SIM_NAMESPACE autotest CHARTMUS
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__CHARTMUS_store_docker_logs() {
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
+       else
+               docker logs $CHART_MUS_APP_NAME > $1$2_chartmuseum.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CHARTMUS_initial_setup() {
+       use_chart_mus_http
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CHARTMUS_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "CHARTMUS $CHART_MUS_APP_NAME $KUBE_SIM_NAMESPACE"
+       else
+               echo "CHARTMUS $CHART_MUS_APP_NAME"
+       fi
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CHARTMUS_test_requirements() {
+       :
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Chartmuseum
+# args: -
+# (Function for test scripts)
+use_chart_mus_http() {
+       __chart_mus_set_protocoll "http" $CHART_MUS_INTERNAL_PORT $CHART_MUS_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Chartmuseum
+# args: -
+# (Function for test scripts)
+use_chart_mus_https() {
+       __chart_mus_set_protocoll "https" $CHART_MUS_INTERNAL_SECURE_PORT $CHART_MUS_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__chart_mus_set_protocoll() {
+       echo -e $BOLD"$CHART_MUS_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD $1 $EBOLD towards $CHART_MUS_DISPLAY_NAME"
+
+       ## Access to Chartmuseum
+
+       CHART_MUS_SERVICE_PATH=$1"://"$CHART_MUS_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       CHART_MUS_SERVICE_PORT=$2
+       CHART_MUS_SERVICE_HOST=$CHART_MUS_APP_NAME
+       if [ $RUNMODE == "KUBE" ]; then
+               CHART_MUS_SERVICE_PATH=$1"://"$CHART_MUS_APP_NAME.$KUBE_SIM_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+               CHART_MUS_SERVICE_PORT=$3
+               CHART_MUS_SERVICE_HOST=$CHART_MUS_APP_NAME.$KUBE_SIM_NAMESPACE
+       fi
+       CHART_MUS_SERVICE_HTTPX=$1
+
+       echo ""
+}
+
+### Admin API functions Chartmuseum
+
+###########################
+### Chartmuseum functions
+###########################
+
+# Export env vars for config files, docker compose and kube resources
+# args:
+__chartmuseum_export_vars() {
+       export CHART_MUS_APP_NAME
+       export CHART_MUS_DISPLAY_NAME
+
+       export DOCKER_SIM_NWNAME
+       export KUBE_SIM_NAMESPACE
+
+       export CHART_MUS_IMAGE
+       export CHART_MUS_INTERNAL_PORT
+       export CHART_MUS_EXTERNAL_PORT
+
+       export CHART_MUS_CHART_CONTR_CHARTS
+
+}
+
+
+# Start the Chartmuseum in the simulator group
+# args: -
+# (Function for test scripts)
+start_chart_museum() {
+
+       echo -e $BOLD"Starting $CHART_MUS_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Check if app shall be fully managed by the test script
+               __check_included_image "CHARTMUS"
+               retcode_i=$?
+
+               # Check if app shall only be used by the testscipt
+               __check_prestarted_image "CHARTMUS"
+               retcode_p=$?
+
+               if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+                       echo -e $RED"The $CHART_MUS_NAME app is not included as managed nor prestarted in this test script"$ERED
+                       echo -e $RED"The $CHART_MUS_APP_NAME will not be started"$ERED
+                       exit
+               fi
+               if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+                       echo -e $RED"The $CHART_MUS_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+                       echo -e $RED"The $CHART_MUS_APP_NAME will not be started"$ERED
+                       exit
+               fi
+
+               if [ $retcode_p -eq 0 ]; then
+                       echo -e " Using existing $CHART_MUS_APP_NAME deployment and service"
+                       echo " Setting RC replicas=1"
+                       __kube_scale deployment $CHART_MUS_APP_NAME $KUBE_SIM_NAMESPACE 1
+               fi
+
+               if [ $retcode_i -eq 0 ]; then
+                       echo -e " Creating $CHART_MUS_APP_NAME deployment and service"
+
+            __kube_create_namespace $KUBE_SIM_NAMESPACE
+
+                       __chartmuseum_export_vars
+
+                       # Create service
+                       input_yaml=$SIM_GROUP"/"$CHART_MUS_COMPOSE_DIR"/"svc.yaml
+                       output_yaml=$PWD/tmp/chartmus_svc.yaml
+                       __kube_create_instance service $CHART_MUS_APP_NAME $input_yaml $output_yaml
+
+                       # Create app
+                       input_yaml=$SIM_GROUP"/"$CHART_MUS_COMPOSE_DIR"/"app.yaml
+                       output_yaml=$PWD/tmp/chartmus_app.yaml
+                       __kube_create_instance app $CHART_MUS_APP_NAME $input_yaml $output_yaml
+               fi
+
+               __check_service_start $CHART_MUS_APP_NAME $CHART_MUS_SERVICE_PATH$CHART_MUS_ALIVE_URL
+       else
+
+               # Check if docker app shall be fully managed by the test script
+               __check_included_image 'CHARTMUS'
+               if [ $? -eq 1 ]; then
+                       echo -e $RED"The Chartmuseum app is not included as managed in this test script"$ERED
+                       echo -e $RED"The Chartmuseum will not be started"$ERED
+                       exit
+               fi
+
+               __chartmuseum_export_vars
+
+               __start_container $CHART_MUS_COMPOSE_DIR "" NODOCKERARGS 1 $CHART_MUS_APP_NAME
+
+        __check_service_start $CHART_MUS_APP_NAME $CHART_MUS_SERVICE_PATH$CHART_MUS_ALIVE_URL
+       fi
+    echo ""
+    return 0
+}
+
+# Excute a curl cmd towards the chartmuseum simulator and check the response code.
+# args: TEST|CONF <expected-response-code> <curl-cmd-string>
+__execute_curl_to_chartmuseum() {
+    TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+    echo "(${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
+       proxyflag=""
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
+               fi
+       fi
+       echo " CMD: $3 -skw %{http_code} $proxyflag" >> $HTTPLOG
+       res="$($3 -skw %{http_code} $proxyflag)"
+       echo " RESP: $res" >> $HTTPLOG
+       retcode=$?
+    if [ $retcode -ne 0 ]; then
+        __log_conf_fail_general " Fatal error when executing curl, response: "$retcode
+        return 1
+    fi
+    status=${res:${#res}-3}
+    if [ $status -eq $2 ]; then
+        if [ $1 == "TEST" ]; then
+            __log_test_pass
+        else
+            __log_conf_ok
+        fi
+        return 0
+    fi
+    if [ $1 == "TEST" ]; then
+        __log_test_fail_status_code $2 $status
+        else
+        __log_conf_fail_status_code $2 $status
+    fi
+    return 1
+}
+
+# upload helmchart
+# arg: <chart-name>
+chartmus_upload_test_chart() {
+       __log_conf_start $@
+    if [ $# -ne 1 ]; then
+        __print_err "<chart-name>" $@
+        return 1
+    fi
+       chart_path=$TESTENV_TEMP_FILES/$1"-0.1.0.tgz"
+       if [ ! -f "$chart_path" ]; then
+               echo -e $RED" Cannot find package chart: $chart_path"$ERED
+               __log_conf_fail_general
+               return 1
+       fi
+       __execute_curl_to_chartmuseum CONF 201 "curl --data-binary @$chart_path $CHART_MUS_SERVICE_PATH/api/charts"
+}
+
+# delete helmchart
+# arg: <chart-name> [<version>]
+chartmus_delete_test_chart() {
+       __log_conf_start $@
+    if [ $# -gt 2 ]; then
+        __print_err "<chart-name> [<version>]" $@
+        return 1
+    fi
+       if [ $# -eq 1 ]; then
+               chart_path="/$1/0.1.0"
+       else
+               chart_path="/$1/$2"
+       fi
+       __execute_curl_to_chartmuseum CONF 200 "curl -X DELETE $CHART_MUS_SERVICE_PATH/api/charts"$chart_path
+}
\ No newline at end of file
index a3a3e5b..281f8ce 100755 (executable)
@@ -84,7 +84,7 @@ __kube_wait_for_zero_count() {
 __kube_delete_all_resources() {
        echo " Delete all in namespace $1 ..."
        namespace=$1
-       resources="deployments replicaset statefulset services pods configmaps pvc "
+       resources="deployments replicaset statefulset services pods configmaps pvc serviceaccounts"
        for restype in $resources; do
                result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
@@ -97,8 +97,8 @@ __kube_delete_all_resources() {
 }
 
 __kube_delete_all_pv() {
-       echo " Delete pv ..."
-       resources="pv"
+       echo " Delete all non-namespaced resources ..."
+       resources="pv clusterrolebindings"
        for restype in $resources; do
                result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
index af85ff3..5adb114 100644 (file)
@@ -180,6 +180,18 @@ __CONSUL_statisics_setup() {
 __CBS_statisics_setup() {
        echo ""
 }
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CONSUL_test_requirements() {
+       :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CBS_test_requirements() {
+       :
+}
 #######################################################
 
 
index 992fd68..803184a 100644 (file)
@@ -104,6 +104,12 @@ __CP_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CP_test_requirements() {
+       :
+}
+
 #######################################################
 
 
index 5116273..40ef7ea 100644 (file)
@@ -131,6 +131,12 @@ __CR_statisics_setup() {
        done
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CR_test_requirements() {
+       :
+}
+
 #######################################################
 
 ################
index a9605ec..9f8dc5f 100644 (file)
@@ -104,6 +104,12 @@ __DMAAPADP_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPADP_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap adapter
index 35280a4..8ed0169 100644 (file)
@@ -104,6 +104,12 @@ __DMAAPMED_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPMED_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap mediator
diff --git a/test/common/helmmanager_api_functions.sh b/test/common/helmmanager_api_functions.sh
new file mode 100644 (file)
index 0000000..455387a
--- /dev/null
@@ -0,0 +1,530 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service managemnt functions test functions for Helm Manager
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: [<image-tag-suffix>] (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__HELMMANAGER_imagesetup() {
+       __check_and_create_image_var HELMMANAGER "HELM_MANAGER_IMAGE" "HELM_MANAGER_IMAGE_BASE" "HELM_MANAGER_IMAGE_TAG" $1 "$HELM_MANAGER_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both arg var may contain: 'remote', 'remote-remove' or 'local'
+__HELMMANAGER_imagepull() {
+       __check_and_pull_image $1 "$HELM_MANAGER_DISPLAY_NAME" $HELM_MANAGER_APP_NAME HELM_MANAGER_IMAGE
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__HELMMANAGER_image_data() {
+       echo -e "$HELM_MANAGER_DISPLAY_NAME\t$(docker images --format $1 $HELM_MANAGER_IMAGE)" >>   $2
+       if [ ! -z "$HELM_MANAGER_IMAGE_SOURCE" ]; then
+               echo -e "-- source image --\t$(docker images --format $1 $HELM_MANAGER_IMAGE_SOURCE)" >>   $2
+       fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__HELMMANAGER_kube_scale_zero() {
+       __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest HELMMANAGER
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__HELMMANAGER_kube_scale_zero_and_wait() {
+       __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-helmmanagerservice
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__HELMMANAGER_kube_delete_all() {
+       __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest HELMMANAGER
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__HELMMANAGER_store_docker_logs() {
+       if [ $RUNMODE == "KUBE" ]; then
+               kubectl  logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
+       else
+               docker logs $HELM_MANAGER_APP_NAME > $1$2_helmmanager.log 2>&1
+       fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__HELMMANAGER_initial_setup() {
+       use_helm_manager_http
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HELMMANAGER_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "HELMMANAGER $HELM_MANAGER_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "HELMMANAGER $HELM_MANAGER_APP_NAME"
+       fi
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__HELMMANAGER_test_requirements() {
+       tmp=$(which helm)
+       if [ $? -ne 0 ]; then
+               echo $RED" Helm3 is required for running helm manager tests. Pls install helm3"
+               exit 1
+       fi
+       tmp_version=$(helm version | grep 'v3')
+       if [ -z "$tmp_version" ]; then
+               echo $RED" Helm3 is required for running helm manager tests. Pls install helm3"
+               exit 1
+       fi
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Helm Manager
+# args: -
+# (Function for test scripts)
+use_helm_manager_http() {
+       __helm_manager_set_protocoll "http" $HELM_MANAGER_INTERNAL_PORT $HELM_MANAGER_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Helm Manager
+# args: -
+# (Function for test scripts)
+use_helm_manager_https() {
+       __helm_manager_set_protocoll "https" $HELM_MANAGER_INTERNAL_SECURE_PORT $HELM_MANAGER_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__helm_manager_set_protocoll() {
+       echo -e $BOLD"$HELM_MANAGER_DISPLAY_NAME protocol setting"$EBOLD
+       echo -e " Using $BOLD $1 $EBOLD towards $HELM_MANAGER_DISPLAY_NAME"
+
+       ## Access to Helm Manager
+
+       HELMMANAGER_SERVICE_PATH=$1"://$HELM_MANAGER_USER:$HELM_MANAGER_PWD@"$HELM_MANAGER_APP_NAME":"$2  # docker access, container->container and script->container via proxy
+       if [ $RUNMODE == "KUBE" ]; then
+               HELMMANAGER_SERVICE_PATH=$1"://$HELM_MANAGER_USER:$HELM_MANAGER_PWD@"$HELM_MANAGER_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+       fi
+
+       echo ""
+}
+
+# Export env vars for config files, docker compose and kube resources
+# args:
+__helm_manager_export_vars() {
+
+       export HELM_MANAGER_APP_NAME
+       export HELM_MANAGER_DISPLAY_NAME
+
+       export DOCKER_SIM_NWNAME
+       export KUBE_NONRTRIC_NAMESPACE
+
+       export HELM_MANAGER_EXTERNAL_PORT
+       export HELM_MANAGER_INTERNAL_PORT
+       export HELM_MANAGER_EXTERNAL_SECURE_PORT
+       export HELM_MANAGER_INTERNAL_SECURE_PORT
+       export HELM_MANAGER_CLUSTER_ROLE
+       export HELM_MANAGER_SA_NAME
+       export HELM_MANAGER_ALIVE_URL
+       export HELM_MANAGER_COMPOSE_DIR
+       export HELM_MANAGER_USER
+       export HELM_MANAGER_PWD
+}
+
+# Start the Helm Manager container
+# args: -
+# (Function for test scripts)
+start_helm_manager() {
+
+       echo -e $BOLD"Starting $HELM_MANAGER_DISPLAY_NAME"$EBOLD
+
+       if [ $RUNMODE == "KUBE" ]; then
+
+               # Check if app shall be fully managed by the test script
+               __check_included_image "HELMMANAGER"
+               retcode_i=$?
+
+               # Check if app shall only be used by the testscipt
+               __check_prestarted_image "HELMMANAGER"
+               retcode_p=$?
+
+               if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+                       echo -e $RED"The $HELM_MANAGER_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+                       echo -e $RED"The $HELM_MANAGER_APP_NAME will not be started"$ERED
+                       exit
+               fi
+               if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+                       echo -e $RED"The $HELM_MANAGER_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+                       echo -e $RED"The $HELM_MANAGER_APP_NAME will not be started"$ERED
+                       exit
+               fi
+
+               if [ $retcode_p -eq 0 ]; then
+                       echo -e " Using existing $HELM_MANAGER_APP_NAME deployment and service"
+                       echo " Setting $HELM_MANAGER_APP_NAME replicas=1"
+                       __kube_scale deployment $HELM_MANAGER_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+               fi
+
+               if [ $retcode_i -eq 0 ]; then
+
+                       echo -e " Creating $HELM_MANAGER_APP_NAME app and expose service"
+
+                       #Check if nonrtric namespace exists, if not create it
+                       __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
+
+                       __helm_manager_export_vars
+
+                       #Create sa
+                       input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"sa.yaml
+                       output_yaml=$PWD/tmp/helmmanager_sa_svc.yaml
+                       __kube_create_instance sa $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+
+                       #Create service
+                       input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"svc.yaml
+                       output_yaml=$PWD/tmp/helmmanager_svc.yaml
+                       __kube_create_instance service $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+
+                       #Create app
+                       input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"app.yaml
+                       output_yaml=$PWD/tmp/helmmanager_app.yaml
+                       __kube_create_instance app $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+               fi
+
+               __check_service_start $HELM_MANAGER_APP_NAME $HELMMANAGER_SERVICE_PATH$HELM_MANAGER_ALIVE_URL
+
+       else
+               __check_included_image 'HELMMANAGER'
+               if [ $? -eq 1 ]; then
+                       echo -e $RED"The Helm Manager app is not included as managed in this test script"$ERED
+                       echo -e $RED"The Helm Manager will not be started"$ERED
+                       exit
+               fi
+
+               __helm_manager_export_vars
+
+               __start_container $HELM_MANAGER_COMPOSE_DIR "" NODOCKERARGS 1 $HELM_MANAGER_APP_NAME
+
+               __check_service_start $HELM_MANAGER_APP_NAME $HELMMANAGER_SERVICE_PATH$HELM_MANAGER_ALIVE_URL
+       fi
+       echo ""
+}
+
+# Excute a curl cmd towards the helm manager.
+# args: GET <path>
+# args: POST <path> <file-to-post>
+# args: POST3 <path> <name> <file-to-post> <name> <file-to-post> <name> <file-to-post>
+__execute_curl_to_helmmanger() {
+    TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+    echo "(${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
+       proxyflag=""
+       if [ ! -z "$KUBE_PROXY_PATH" ]; then
+               if [ $KUBE_PROXY_HTTPX == "http" ]; then
+                       proxyflag=" --proxy $KUBE_PROXY_PATH"
+               else
+                       proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
+               fi
+       fi
+       if [ $1 == "GET" ]; then
+               curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2"
+       elif [ $1 == "POST" ]; then
+               curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X POST --data-binary @$3 -H Content-Type:application/json"
+       elif [ $1 == "POST1_2" ]; then
+               curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X POST -F $3=<$4 -F $5=@$6 -F $7=@$8 "
+       elif [ $1 == "DELETE" ]; then
+               curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X DELETE"
+       else
+               echo " Unknown operation $1" >> $HTTPLOG
+               echo "000"
+               return 1
+       fi
+       echo " CMD: $curlstring" >> $HTTPLOG
+       res="$($curlstring)"
+       retcode=$?
+       echo " RESP: $res" >> $HTTPLOG
+    if [ $retcode -ne 0 ]; then
+        echo "000"
+               return 1
+    fi
+    echo $res
+       return 0
+}
+
+# API Test function: GET â€‹/helm/charts
+# args: <response-code> [ EMPTY | ( <chart> <version> <namespace> <release> <repo> )+ ]
+# (Function for test scripts)
+helm_manager_api_get_charts() {
+       __log_test_start $@
+
+       error_params=1
+       variablecount=$(($#-1))
+    if [ $# -eq 1 ]; then
+               error_params=0
+       elif [ $# -eq 2 ] && [ $2 == "EMPTY" ]; then
+               error_params=0
+       elif [ $(($variablecount%5)) -eq 0 ]; then
+               error_params=0
+       fi
+
+
+       if [ $error_params -eq 1 ]; then
+               __print_err "<response-code>" $@
+               return 1
+       fi
+
+       query="/helm/charts"
+    res="$(__execute_curl_to_helmmanger GET $query)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       if [ $# -gt 1 ]; then
+               body=${res:0:${#res}-3}
+               shift
+               if [ $# -eq 1 ]; then
+                       targetJson='{"charts":[]}'
+               else
+                       targetJson='{"charts":['
+                       arr=(${@})
+                       for ((i=0; i<$#; i=i+5)); do
+                               if [ "$i" -gt 0 ]; then
+                                       targetJson=$targetJson","
+                               fi
+                               chart_version=${arr[$i+2]}
+                               if [ $chart_version == "DEFAULT-VERSION" ]; then
+                                       chart_version="0.1.0"
+                               fi
+                               targetJson=$targetJson'{"releaseName":"'${arr[$i+3]}'","chartId":{"name":"'${arr[$i+1]}'","version":"'0.1.0'"},"namespace":"'${arr[$i+4]}'","repository":{"repoName":"'${arr[$i+0]}'","protocol":null,"address":null,"port":null,"userName":null,"password":null},"overrideParams":null}'
+                       done
+                       targetJson=$targetJson']}'
+               fi
+               echo " TARGET JSON: $targetJson" >> $HTTPLOG
+               res=$(python3 ../common/compare_json.py "$targetJson" "$body")
+
+               if [ $res -ne 0 ]; then
+                       __log_test_fail_body
+                       return 1
+               fi
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# API Test function: POST â€‹/helm/repo - add repo
+# args: <response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>
+# (Function for test scripts)
+helm_manager_api_post_repo() {
+       __log_test_start $@
+
+    if [ $# -ne 5 ]; then
+               __print_err "<response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>" $@
+               return 1
+       fi
+
+       query="/helm/repo"
+       file="./tmp/cm-repo.json"
+       file_data='{"address" : "'$4'","repoName": "'$2'","protocol": "'$3'","port": "'$5'"}'
+       echo $file_data > $file
+       echo " FILE: $file_data" >> $HTTPLOG
+    res="$(__execute_curl_to_helmmanger POST $query $file)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# API Test function: POST /helm/onboard/chart - onboard chart
+# args: <response-code> <repo> <chart> <version> <release> <namespace>
+# (Function for test scripts)
+helm_manager_api_post_onboard_chart() {
+       __log_test_start $@
+
+    if [ $# -ne 6 ]; then
+               __print_err "<response-code> <repo> <chart> <version> <release> <namespace> " $@
+               return 1
+       fi
+
+       query="/helm/onboard/chart"
+       file="./tmp/chart.json"
+       chart_version=$4
+       if [ $chart_version == "DEFAULT-VERSION" ]; then
+               chart_version="0.1.0"
+       fi
+       file_data='{"chartId":{"name":"'$3'","version":"'$chart_version'"},"namespace":"'$6'","repository":{"repoName":"'$2'"},"releaseName":"'$5'"}'
+       echo $file_data > $file
+       echo " FILE - ($file): $file_data" >> $HTTPLOG
+       file2="./tmp/override.yaml"
+       echo "" >> $file2
+       file3="$TESTENV_TEMP_FILES/"$3"-"$chart_version".tgz"
+    res="$(__execute_curl_to_helmmanger POST1_2 $query info $file values $file2 chart $file3)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# API Test function: POST /helm/install - install chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_post_install_chart() {
+       __log_test_start $@
+
+    if [ $# -ne 3 ]; then
+               __print_err "<response-code> <chart> <version>" $@
+               return 1
+       fi
+
+       query="/helm/install"
+       file="./tmp/app-installation.json"
+       chart_version=$3
+       if [ $chart_version == "DEFAULT-VERSION" ]; then
+               chart_version="0.1.0"
+       fi
+       file_data='{"name": "'$2'","version": "'$chart_version'"}'
+       echo $file_data > $file
+       echo " FILE - ($file): $file_data" >> $HTTPLOG
+    res="$(__execute_curl_to_helmmanger POST $query $file)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# API Test function: DELETE /helm/uninstall - uninstall chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_uninstall_chart() {
+       __log_test_start $@
+
+    if [ $# -ne 3 ]; then
+               __print_err "<response-code> <chart> <version> " $@
+               return 1
+       fi
+
+       chart_version=$3
+       if [ $chart_version == "DEFAULT-VERSION" ]; then
+               chart_version="0.1.0"
+       fi
+       query="/helm/uninstall/$2/$chart_version"
+    res="$(__execute_curl_to_helmmanger DELETE $query)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# API Test function: DELETE /helm/chart - delete chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_delete_chart() {
+       __log_test_start $@
+
+    if [ $# -ne 3 ]; then
+               __print_err "<response-code> <chart> <version> " $@
+               return 1
+       fi
+
+       chart_version=$3
+       if [ $chart_version == "DEFAULT-VERSION" ]; then
+               chart_version="0.1.0"
+       fi
+       query="/helm/chart/$2/$chart_version"
+    res="$(__execute_curl_to_helmmanger DELETE $query)"
+    status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# Config function: Add repo in helm manager by helm using exec
+# args: <repo-name> <repo-url>
+# (Function for test scripts)
+helm_manager_api_exec_add_repo() {
+       __log_conf_start $@
+
+    if [ $# -ne 2 ]; then
+               __print_err "<repo-name> <repo-url>" $@
+               return 1
+       fi
+
+       if [ $RUNMODE == "DOCKER" ]; then
+               retmsg=$(docker exec -it $HELM_MANAGER_APP_NAME helm repo add $1 $2)
+               retcode=$?
+               if [ $retcode -ne 0 ]; then
+                       __log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
+                       return 1
+               fi
+       else
+               retmsg=$(kubectl exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
+               retcode=$?
+               if [ $retcode -ne 0 ]; then
+                       __log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
+                       return 1
+               fi
+       fi
+       __log_conf_ok
+       return 0
+}
+
index af11f14..c417d42 100644 (file)
@@ -118,6 +118,12 @@ __HTTPPROXY_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__HTTPPROXY_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the http proxy
index df2de4f..0e87517 100644 (file)
@@ -103,6 +103,12 @@ __ICS_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__ICS_test_requirements() {
+       :
+}
+
 #######################################################
 
 
index 002657c..4b15641 100644 (file)
@@ -119,6 +119,12 @@ __KAFKAPC_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__KAFKAPC_test_requirements() {
+       :
+}
+
 #######################################################
 
 #######################################################
index eb4600c..38aeb21 100644 (file)
@@ -119,6 +119,12 @@ __KUBEPROXY_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__KUBEPROXY_test_requirements() {
+       :
+}
+
 #######################################################
 
 ## Access to Kube http proxy
diff --git a/test/common/localhelm_api_functions.sh b/test/common/localhelm_api_functions.sh
new file mode 100644 (file)
index 0000000..c7a6d3d
--- /dev/null
@@ -0,0 +1,193 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This is a script that contains function to handle helm on localhost
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__LOCALHELM_imagesetup() {
+       :
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__LOCALHELM_imagepull() {
+       :
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__LOCALHELM_imagebuild() {
+       :
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__LOCALHELM_image_data() {
+       :
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__LOCALHELM_kube_scale_zero() {
+       :
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__LOCALHELM_kube_scale_zero_and_wait() {
+       :
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__LOCALHELM_kube_delete_all() {
+       :
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__LOCALHELM_store_docker_logs() {
+       :
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__LOCALHELM_initial_setup() {
+       :
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__LOCALHELM_statisics_setup() {
+       :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__LOCALHELM_test_requirements() {
+       tmp=$(which helm)
+       if [ $? -ne 0 ]; then
+               echo $RED" Helm3 is required for running this test. Pls install helm3"
+               exit 1
+       fi
+       tmp_version=$(helm version | grep 'v3')
+       if [ -z "$tmp_version" ]; then
+               echo $RED" Helm3 is required for running this test. Pls install helm3"
+               exit 1
+       fi
+}
+
+#######################################################
+
+
+# Create a dummy helmchart
+# arg: <chart-name>
+localhelm_create_test_chart() {
+       __log_conf_start $@
+    if [ $# -ne 1 ]; then
+        __print_err "<path-to-chart-dir>" $@
+        return 1
+    fi
+       if [[ "$1" == *"/"* ]]; then
+               echo -e $RED"Chart name cannot contain '/'"
+               __log_conf_fail_general
+               return 1
+       fi
+       helm create $TESTENV_TEMP_FILES/$1 | indent1
+       if [ $? -ne 0 ]; then
+               __log_conf_fail_general
+               return 1
+       fi
+       __log_conf_ok
+       return 0
+}
+
+# Package a created helmchart
+# arg: <chart-name>
+localhelm_package_test_chart() {
+       __log_conf_start $@
+    if [ $# -ne 1 ]; then
+        __print_err "<path-to-chart-dir>" $@
+        return 1
+    fi
+       if [[ "$1" == *"/"* ]]; then
+               echo -e $RED"Chart name cannot contain '/'"
+               __log_conf_fail_general
+               return 1
+       fi
+       helm package -d $TESTENV_TEMP_FILES $TESTENV_TEMP_FILES/$1 | indent1
+       if [ $? -ne 0 ]; then
+               __log_conf_fail_general
+               return 1
+       fi
+       __log_conf_ok
+       return 0
+}
+
+# Check if a release is installed
+# arg: INSTALLED|NOTINSTALLED <release-name> <name-space>
+localhelm_installed_chart_release() {
+       __log_test_start $@
+    if [ $# -ne 3 ]; then
+        __print_err "INSTALLED|NOTINSTALLED <release-name> <name-space>" $@
+        return 1
+    fi
+       if [ $1 != "INSTALLED" ] && [ $1 != "NOTINSTALLED" ]; then
+        __print_err "INSTALLED|NOTINSTALLED <release-name> <name-space>" $@
+        return 1
+       fi
+
+       filter="helm ls -n $3 --filter ^$2"
+       res=$($filter -q)
+       if [ $? -ne 0 ]; then
+               __log_test_fail_general "Failed to list helm releases"
+               return 1
+       fi
+       if [ $1 == "INSTALLED" ]; then
+               if [ "$res" != $2 ]; then
+                       echo -e "$RED Release $2 does not exists $ERED"
+                       __log_test_fail_general
+                       return 1
+               fi
+       elif [ $1 == "NOTINSTALLED" ]; then
+               if [ "$res" == $2 ]; then
+                       __log_test_fail_general "Release $2 exists"
+                       return 1
+               fi
+       fi
+       echo " Currently installed releases in namespace $3"
+       helm ls -n $3  | indent2
+       __log_test_pass
+       return 0
+}
index 3e00ec3..122b412 100755 (executable)
@@ -217,6 +217,18 @@ __DMAAPMR_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__MR_test_requirements() {
+       :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPMR_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
index 9e29278..bb37799 100644 (file)
@@ -104,6 +104,12 @@ __NGW_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__NGW_test_requirements() {
+       :
+}
+
 #######################################################
 
 
index 7c91705..9d4d1f1 100644 (file)
@@ -103,6 +103,12 @@ __PA_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PA_test_requirements() {
+       :
+}
+
 
 #######################################################
 
index f792d69..b3e3dea 100644 (file)
@@ -119,6 +119,12 @@ __PRODSTUB_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PRODSTUB_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Prod stub sim
@@ -192,13 +198,13 @@ start_prod_stub() {
                retcode_p=$?
 
                if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
-                       echo -e $RED"The $ICS_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
-                       echo -e $RED"The $ICS_APP_NAME will not be started"$ERED
+                       echo -e $RED"The $PROD_STUB_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+                       echo -e $RED"The $PROD_STUB_APP_NAME will not be started"$ERED
                        exit
                fi
                if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
-                       echo -e $RED"The $ICS_APP_NAME app is included both as managed and prestarted in this test script"$ERED
-                       echo -e $RED"The $ICS_APP_NAME will not be started"$ERED
+                       echo -e $RED"The $PROD_STUB_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+                       echo -e $RED"The $PROD_STUB_APP_NAME will not be started"$ERED
                        exit
                fi
 
index 5d37bd0..feb4440 100644 (file)
@@ -98,6 +98,12 @@ __PVCCLEANER_statisics_setup() {
        echo ""
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PVCCLEANER_test_requirements() {
+       :
+}
+
 #######################################################
 
 # This is a system app, all usage in testcase_common.sh
\ No newline at end of file
index 3766d19..b17b6bf 100644 (file)
@@ -34,7 +34,7 @@ __RC_imagesetup() {
 # <pull-policy-original> Shall be used for images that does not allow overriding
 # Both arg var may contain: 'remote', 'remote-remove' or 'local'
 __RC_imagepull() {
-       __check_and_pull_image $1 "$c" $RAPP_CAT_APP_NAME RAPP_CAT_IMAGE
+       __check_and_pull_image $1 "$RAPP_CAT_DISPLAY_NAME" $RAPP_CAT_APP_NAME RAPP_CAT_IMAGE
 }
 
 # Generate a string for each included image using the app display name and a docker images format string
@@ -96,6 +96,12 @@ __RC_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__RC_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Rapp catalogue
index bd52611..2953eb0 100644 (file)
@@ -116,6 +116,12 @@ __RICSIM_statisics_setup() {
        done
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__RICSIM_test_requirements() {
+       :
+}
+
 #######################################################
 
 
@@ -257,7 +263,6 @@ start_ric_simulators() {
                        done
                fi
        else
-
                __check_included_image 'RICSIM'
                if [ $? -eq 1 ]; then
                        echo -e $RED"The Near-RT RIC Simulator app is not included as managed in this test script"$ERED
index 3ac0a6c..68cf976 100644 (file)
@@ -120,6 +120,12 @@ __SDNC_statisics_setup() {
        fi
 }
 
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__SDNC_test_requirements() {
+       :
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to SDNC
index 9d13a81..c0f1491 100644 (file)
@@ -69,17 +69,17 @@ NEXUS_RELEASE_REPO_ONAP=$NEXUS_RELEASE_REPO
 
 # Policy Agent image and tags
 POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.6-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.6-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.6-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.5"
 
 # SDNC A1 Controller remote image and tag
 SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
-SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.2.1-SNAPSHOT" ###CHECK THIS
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.1-STAGING-latest"
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.2.1-STAGING-latest"  #Will use snapshot repo
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.2.1"
+SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.2.5-SNAPSHOT" ###CHECK THIS
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.5-STAGING-latest"
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.2.5-STAGING-latest"  #Will use snapshot repo
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.2.4"
 
 #SDNC DB remote image and tag
 #The DB is part of SDNC so handled in the same way as SDNC
diff --git a/test/common/test_env-onap-jakarta.sh b/test/common/test_env-onap-jakarta.sh
new file mode 100644 (file)
index 0000000..386f168
--- /dev/null
@@ -0,0 +1,427 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+#Profile for ONAP honolulu release
+TEST_ENV_PROFILE="ONAP-JAKARTA"
+FLAVOUR="ONAP"
+
+########################################
+## Nexus repo settings
+########################################
+
+# Nexus repos for developed images
+NEXUS_PROXY_REPO="nexus3.onap.org:10001/"
+NEXUS_RELEASE_REPO="nexus3.onap.org:10002/"
+NEXUS_SNAPSHOT_REPO="nexus3.onap.org:10003/"
+NEXUS_STAGING_REPO=$NEXUS_SNAPSHOT_REPO  #staging repo not used in ONAP, using snapshot
+
+# Nexus repos for images used by test (not developed by the project)
+NEXUS_RELEASE_REPO_ORAN="nexus3.o-ran-sc.org:10002/" # Only for released ORAN images
+NEXUS_RELEASE_REPO_ONAP=$NEXUS_RELEASE_REPO
+
+########################################
+# Set up of image and tags for the test.
+########################################
+
+# NOTE: One environment variable containing the image name and tag is create by the test script
+# for each image from the env variables below.
+# The variable is created by removing the suffix "_BASE" from the base image variable name.
+# Example: POLICY_AGENT_IMAGE_BASE -> POLICY_AGENT_IMAGE
+# This var will point to the local or remote image depending on cmd line arguments.
+# In addition, the repo and the image tag version are selected from the list of image tags based on the cmd line argurment.
+# For images built by the script, only tag #1 shall be specified
+# For project images, only tag #1, #2, #3 and #4 shall be specified
+# For ORAN images (non project), only tag #5 shall be specified
+# For ONAP images (non project), only tag #6 shall be specified
+# For all other images, only tag #7 shall be specified
+# 1 XXX_LOCAL: local images: <image-name>:<local-tag>
+# 2 XXX_REMOTE_SNAPSHOT: snapshot images: <snapshot-nexus-repo><image-name>:<snapshot-tag>
+# 3 XXX_REMOTE: staging images: <staging-nexus-repo><image-name>:<staging-tag>
+# 4 XXX_REMOTE_RELEASE: release images: <release-nexus-repo><image-name>:<release-tag>
+# 5 XXX_REMOTE_RELEASE_ORAN: ORAN release images: <oran-release-nexus-repo><image-name>:<release-tag>
+# 6 XXX_REMOTE_RELEASE_ONAP: ONAP release images: <onap-release-nexus-repo><image-name>:<release-tag>
+# 7 XXX_PROXY: other images, not produced by the project: <proxy-nexus-repo><mage-name>:<proxy-tag>
+
+#############################################################################
+# Note:
+# The imgage tags for pms and sdnc are updated AFTER the release.
+# This means that the latest staging/snapshot images for these two components have
+# version one step (0.0.1 - bug-level) higher than the
+# latest release image version.
+
+# This is only applicable for ONAP images
+#############################################################################
+
+# Policy Agent image and tags
+POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0"
+
+# SDNC A1 Controller remote image and tag
+SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
+SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.3.0-SNAPSHOT" ###CHECK THIS
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.3.0-STAGING-latest"
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.3.0-STAGING-latest"  #Will use snapshot repo
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.3.0"
+
+#SDNC DB remote image and tag
+#The DB is part of SDNC so handled in the same way as SDNC
+SDNC_DB_IMAGE_BASE="mariadb"
+SDNC_DB_IMAGE_TAG_REMOTE_PROXY="10.5"
+
+# ICS image and tag - used e release
+ICS_IMAGE_BASE="o-ran-sc/nonrtric-information-coordinator-service"
+ICS_IMAGE_TAG_REMOTE_RELEASE_ORAN="1.2.0"
+#Note: Update var ICS_FEATURE_LEVEL if image version is changed
+
+# Control Panel image and tag - used e release
+CONTROL_PANEL_IMAGE_BASE="o-ran-sc/nonrtric-controlpanel"
+CONTROL_PANEL_IMAGE_TAG_REMOTE_RELEASE_ORAN="2.3.0"
+
+# Gateway image and tags - used e release
+NRT_GATEWAY_IMAGE_BASE="o-ran-sc/nonrtric-gateway"
+NRT_GATEWAY_IMAGE_TAG_REMOTE_RELEASE_ORAN="1.0.0"
+
+# RAPP Catalogue image and tags - used e release
+RAPP_CAT_IMAGE_BASE="o-ran-sc/nonrtric-r-app-catalogue"
+RAPP_CAT_IMAGE_TAG_REMOTE_RELEASE_ORAN="1.0.1"
+
+
+# Near RT RIC Simulator image and tags - used e release
+RIC_SIM_IMAGE_BASE="o-ran-sc/a1-simulator"
+RIC_SIM_IMAGE_TAG_REMOTE_RELEASE_ORAN="2.2.0"
+
+
+#Consul remote image and tag
+CONSUL_IMAGE_BASE="consul"
+CONSUL_IMAGE_TAG_REMOTE_PROXY="1.7.2"
+#No local image for Consul, remote image always used
+
+
+#CBS remote image and tag
+CBS_IMAGE_BASE="onap/org.onap.dcaegen2.platform.configbinding.app-app"
+CBS_IMAGE_TAG_REMOTE_RELEASE_ONAP="2.3.0"
+#No local image for CBS, remote image always used
+
+
+#MR stub image and tag
+MRSTUB_IMAGE_BASE="mrstub"
+MRSTUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for MR stub, local image always used
+
+
+#Callback receiver image and tag
+CR_IMAGE_BASE="callback-receiver"
+CR_IMAGE_TAG_LOCAL="latest"
+#No remote image for CR, local image always used
+
+
+#Producer stub image and tag
+PROD_STUB_IMAGE_BASE="producer-stub"
+PROD_STUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for producer stub, local image always used
+
+
+#Http proxy remote image and tag
+HTTP_PROXY_IMAGE_BASE="nodejs-http-proxy"
+HTTP_PROXY_IMAGE_TAG_LOCAL="latest"
+#No local image for http proxy, remote image always used
+
+#ONAP Zookeeper remote image and tag
+ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
+#No local image for ONAP Zookeeper, remote image always used
+
+#ONAP Kafka remote image and tag
+ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
+#No local image for ONAP Kafka, remote image always used
+
+#ONAP DMAAP-MR remote image and tag
+ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
+#No local image for ONAP DMAAP-MR, remote image always used
+
+#Kube proxy remote image and tag
+KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
+
+#Kube proxy remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
+# List of app short names produced by the project
+PROJECT_IMAGES_APP_NAMES="PA SDNC"
+
+# List of app short names which images pulled from ORAN
+ORAN_IMAGES_APP_NAMES="CP ICS RICSIM RC NGW"
+
+# List of app short names which images pulled from ONAP
+ONAP_IMAGES_APP_NAMES=""   # Not used
+
+
+########################################
+# Detailed settings per app
+########################################
+
+
+DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker private network
+
+KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
+KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
+KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
+
+POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
+POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
+POLICY_AGENT_EXTERNAL_SECURE_PORT=8433                   # Policy Agent container external secure port (host -> container)
+POLICY_AGENT_INTERNAL_SECURE_PORT=8433                   # Policy Agent container internal secure port (container -> container)
+POLICY_AGENT_APIS="V1 V2"                                # Supported northbound api versions
+PMS_VERSION="V2"                                         # Tested version of northbound API
+PMS_API_PREFIX="/a1-policy"                               # api url prefix, only for V2. Shall contain leading "/"
+
+POLICY_AGENT_APP_NAME="policymanagementservice"          # Name for Policy Agent container
+POLICY_AGENT_DISPLAY_NAME="Policy Management Service"
+POLICY_AGENT_HOST_MNT_DIR="./mnt"                        # Mounted dir, relative to compose file, on the host
+POLICY_AGENT_LOGPATH="/var/log/policy-agent/application.log" # Path the application log in the Policy Agent container
+POLICY_AGENT_APP_NAME_ALIAS="policy-agent-container"     # Alias name, name used by the control panel
+POLICY_AGENT_CONFIG_KEY="policy-agent"                   # Key for consul config
+POLICY_AGENT_PKG_NAME="org.onap.ccsdk.oran.a1policymanagementservice"  # Java base package name
+POLICY_AGENT_ACTUATOR="/actuator/loggers/$POLICY_AGENT_PKG_NAME" # Url for trace/debug
+POLICY_AGENT_ALIVE_URL="$PMS_API_PREFIX/v2/status"       # Base path for alive check
+POLICY_AGENT_COMPOSE_DIR="policy_agent"                  # Dir in simulator_group for docker-compose
+POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in container for config file
+POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
+POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
+POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+
+ICS_APP_NAME="informationservice"                        # Name for ICS container
+ICS_DISPLAY_NAME="Enrichment Coordinator Service"        # Display name for ICS container
+ICS_EXTERNAL_PORT=8083                                   # ICS container external port (host -> container)
+ICS_INTERNAL_PORT=8083                                   # ICS container internal port (container -> container)
+ICS_EXTERNAL_SECURE_PORT=8434                            # ICS container external secure port (host -> container)
+ICS_INTERNAL_SECURE_PORT=8434                            # ICS container internal secure port (container -> container)
+
+ICS_LOGPATH="/var/log/information-coordinator-service/application.log" # Path the application log in the ICS container
+ICS_APP_NAME_ALIAS="information-service-container"       # Alias name, name used by the control panel
+ICS_HOST_MNT_DIR="./mnt"                                 # Mounted dir, relative to compose file, on the host
+ICS_CONTAINER_MNT_DIR="/var/information-coordinator-service" # Mounted dir in the container
+ICS_ACTUATOR="/actuator/loggers/org.oransc.information"  # Url for trace/debug
+ICS_CERT_MOUNT_DIR="./cert"
+ICS_ALIVE_URL="/status"                                  # Base path for alive check
+ICS_COMPOSE_DIR="ics"                                    # Dir in simulator_group for docker-compose
+ICS_CONFIG_MOUNT_PATH=/opt/app/information-coordinator-service/config # Internal container path for configuration
+ICS_CONFIG_FILE=application.yaml                         # Config file name
+ICS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
+ICS_FEATURE_LEVEL="INFO-TYPES"                           # Space separated list of features
+
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
+MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
+MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
+MR_STUB_DISPLAY_NAME="Message Router stub"
+MR_STUB_CERT_MOUNT_DIR="./cert"
+MR_EXTERNAL_PORT=3904                                    # MR dmaap/stub container external port
+MR_INTERNAL_PORT=3904                                    # MR dmaap/stub container internal port
+MR_EXTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container external secure port
+MR_INTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container internal secure port
+MR_DMAAP_LOCALHOST_PORT=3904                             # MR stub container external port (host -> container)
+MR_STUB_LOCALHOST_PORT=3908                              # MR stub container external port (host -> container)
+MR_DMAAP_LOCALHOST_SECURE_PORT=3905                      # MR stub container internal port (container -> container)
+MR_STUB_LOCALHOST_SECURE_PORT=3909                       # MR stub container external secure port (host -> container)
+MR_READ_TOPIC="A1-POLICY-AGENT-READ"                     # Read topic
+MR_WRITE_TOPIC="A1-POLICY-AGENT-WRITE"                   # Write topic
+MR_READ_URL="/events/$MR_READ_TOPIC/users/policy-agent?timeout=15000&limit=100" # Path to read messages from MR
+MR_WRITE_URL="/events/$MR_WRITE_TOPIC"                   # Path to write messages to MR
+MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub alive check
+MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
+MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
+MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
+MR_KAFKA_DOCKER_LOCALHOST_PORT=30098                     # Kafka port number for docker localhost
+MR_KAFKA_KUBE_NODE_PORT=30099                            # Kafka node port number for kube
+MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs1"                      # Config files dir on localhost
+
+CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
+CR_DISPLAY_NAME="Callback Reciever"
+CR_EXTERNAL_PORT=8090                                    # Callback receiver container external port (host -> container)
+CR_INTERNAL_PORT=8090                                    # Callback receiver container internal port (container -> container)
+CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
+CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
+CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
+CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
+CR_ALIVE_URL="/reset"                                    # Base path for alive check
+CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
+
+PROD_STUB_APP_NAME="producer-stub"                       # Name for the Producer stub
+PROD_STUB_DISPLAY_NAME="Producer Stub"
+PROD_STUB_EXTERNAL_PORT=8092                             # Producer stub container external port (host -> container)
+PROD_STUB_INTERNAL_PORT=8092                             # Producer stub container internal port (container -> container)
+PROD_STUB_EXTERNAL_SECURE_PORT=8093                      # Producer stub container external secure port (host -> container)
+PROD_STUB_INTERNAL_SECURE_PORT=8093                      # Producer stub container internal secure port (container -> container)
+PROD_STUB_JOB_CALLBACK="/callbacks/job"                  # Callback path for job create/update/delete
+PROD_STUB_SUPERVISION_CALLBACK="/callbacks/supervision"  # Callback path for producre supervision
+PROD_STUB_ALIVE_URL="/"                                  # Base path for alive check
+PROD_STUB_COMPOSE_DIR="prodstub"                         # Dir in simulator_group for docker-compose
+
+CONSUL_HOST="consul-server"                              # Host name of consul
+CONSUL_DISPLAY_NAME="Consul"
+CONSUL_EXTERNAL_PORT=8500                                # Consul container external port (host -> container)
+CONSUL_INTERNAL_PORT=8500                                # Consul container internal port (container -> container)
+CONSUL_APP_NAME="polman-consul"                          # Name for consul container
+CONSUL_ALIVE_URL="/ui/dc1/kv"                            # Base path for alive check
+CONSUL_CBS_COMPOSE_DIR="consul_cbs"                      # Dir in simulator group for docker compose
+
+CBS_APP_NAME="polman-cbs"                                # Name for CBS container
+CBS_DISPLAY_NAME="Config Binding Service"
+CBS_EXTERNAL_PORT=10000                                  # CBS container external port (host -> container)
+CBS_INTERNAL_PORT=10000                                  # CBS container internal port (container -> container)
+CONFIG_BINDING_SERVICE="config-binding-service"          # Host name of CBS
+CBS_ALIVE_URL="/healthcheck"                             # Base path for alive check
+
+RIC_SIM_DISPLAY_NAME="Near-RT RIC A1 Simulator"
+RIC_SIM_BASE="g"                                         # Base name of the RIC Simulator container, shall be the group code
+                                                         # Note, a prefix is added to each container name by the .env file in the 'ric' dir
+RIC_SIM_PREFIX="ricsim"                                  # Prefix added to ric container name, added in the .env file in the 'ric' dir
+                                                         # This prefix can be changed from the command line
+RIC_SIM_INTERNAL_PORT=8085                               # RIC Simulator container internal port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_INTERNAL_SECURE_PORT=8185                        # RIC Simulator container internal secure port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_CERT_MOUNT_DIR="./cert"
+RIC_SIM_COMPOSE_DIR="ric"                                # Dir in simulator group for docker compose
+RIC_SIM_ALIVE_URL="/"
+RIC_SIM_COMMON_SVC_NAME=""                               # Name of svc if one common svc is used for all ric sim groups (stateful sets)
+
+SDNC_APP_NAME="a1controller"                             # Name of the SNDC A1 Controller container
+SDNC_DISPLAY_NAME="SDNC A1 Controller"
+SDNC_EXTERNAL_PORT=8282                                  # SNDC A1 Controller container external port (host -> container)
+SDNC_INTERNAL_PORT=8181                                  # SNDC A1 Controller container internal port (container -> container)
+SDNC_EXTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container external securee port (host -> container)
+SDNC_INTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container internal secure port (container -> container)
+SDNC_DB_APP_NAME="sdncdb"                                # Name of the SDNC DB container
+SDNC_A1_TRUSTSTORE_PASSWORD="a1adapter"                  # SDNC truststore password
+SDNC_USER="admin"                                        # SDNC username
+SDNC_PWD="admin"                                         # SNDC PWD
+SDNC_PWD="Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"   # SNDC PWD
+#SDNC_API_URL="/rests/operations/A1-ADAPTER-API:"         # Base url path for SNDC API (for upgraded sdnc)
+SDNC_API_URL="/restconf/operations/A1-ADAPTER-API:"      # Base url path for SNDC API
+SDNC_ALIVE_URL="/apidoc/explorer/"                       # Base url path for SNDC API docs (for alive check)
+SDNC_COMPOSE_DIR="sdnc"
+SDNC_COMPOSE_FILE="docker-compose-2.yml"
+SDNC_KUBE_APP_FILE="app2.yaml"
+SDNC_KARAF_LOG="/opt/opendaylight/data/log/karaf.log"    # Path to karaf log
+#SDNC_RESPONSE_JSON_KEY="A1-ADAPTER-API:output"           # Key name for output json in replies from sdnc (for upgraded sdnc)
+SDNC_RESPONSE_JSON_KEY="output"                          # Key name for output json in replies from sdnc
+SDNC_FEATURE_LEVEL="TRANS_RESP_CODE"                     # Space separated list of features
+                                                         # TRANS_RESP_CODE: SDNC return southbound response code
+
+RAPP_CAT_APP_NAME="rappcatalogueservice"                 # Name for the RAPP Catalogue
+RAPP_CAT_DISPLAY_NAME="RAPP Catalogue Service"
+RAPP_CAT_EXTERNAL_PORT=8680                              # RAPP Catalogue container external port (host -> container)
+RAPP_CAT_INTERNAL_PORT=8680                              # RAPP Catalogue container internal port (container -> container)
+RAPP_CAT_EXTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container external secure port (host -> container)
+RAPP_CAT_INTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container internal secure port (container -> container)
+RAPP_CAT_ALIVE_URL="/services"                           # Base path for alive check
+RAPP_CAT_COMPOSE_DIR="rapp_catalogue"                    # Dir in simulator_group for docker-compose
+
+CONTROL_PANEL_APP_NAME="controlpanel"                    # Name of the Control Panel container
+CONTROL_PANEL_DISPLAY_NAME="Non-RT RIC Control Panel"
+CONTROL_PANEL_EXTERNAL_PORT=8080                         # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_PORT=8080                         # Control Panel container internal port (container -> container)
+CONTROL_PANEL_EXTERNAL_SECURE_PORT=8880                  # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_SECURE_PORT=8082                  # Control Panel container intternal port (container -> container)
+CONTROL_PANEL_LOGPATH="/logs/nonrtric-controlpanel.log"  # Path the application log in the Control Panel container
+CONTROL_PANEL_ALIVE_URL="/"                              # Base path for alive check
+CONTROL_PANEL_COMPOSE_DIR="control_panel"                # Dir in simulator_group for docker-compose
+CONTROL_PANEL_CONFIG_MOUNT_PATH=/maven                   # Container internal path for config
+CONTROL_PANEL_CONFIG_FILE=application.properties         # Config file name
+CONTROL_PANEL_HOST_MNT_DIR="./mnt"                       # Mounted dir, relative to compose file, on the host
+
+NRT_GATEWAY_APP_NAME="nonrtricgateway"                   # Name of the Gateway container
+NRT_GATEWAY_DISPLAY_NAME="NonRT-RIC Gateway"
+NRT_GATEWAY_EXTERNAL_PORT=9090                           # Gateway container external port (host -> container)
+NRT_GATEWAY_INTERNAL_PORT=9090                           # Gateway container internal port (container -> container)
+NRT_GATEWAY_EXTERNAL_SECURE_PORT=9091                    # Gateway container external port (host -> container)
+NRT_GATEWAY_INTERNAL_SECURE_PORT=9091                    # Gateway container internal port (container -> container)
+NRT_GATEWAY_LOGPATH="/var/log/nonrtric-gateway/application.log" # Path the application log in the Gateway container
+NRT_GATEWAY_HOST_MNT_DIR="./mnt"                         # Mounted dir, relative to compose file, on the host
+NRT_GATEWAY_ALIVE_URL="/actuator/metrics"                # Base path for alive check
+NRT_GATEWAY_COMPOSE_DIR="ngw"                            # Dir in simulator_group for docker-compose
+NRT_GATEWAY_CONFIG_MOUNT_PATH=/opt/app/nonrtric-gateway/config  # Container internal path for config
+NRT_GATEWAY_CONFIG_FILE=application.yaml                 # Config file name
+NRT_GATEWAY_PKG_NAME="org.springframework.cloud.gateway" # Java base package name
+NRT_GATEWAY_ACTUATOR="/actuator/loggers/$NRT_GATEWAY_PKG_NAME" # Url for trace/debug
+
+HTTP_PROXY_APP_NAME="httpproxy"                          # Name of the Http Proxy container
+HTTP_PROXY_DISPLAY_NAME="Http Proxy"
+HTTP_PROXY_EXTERNAL_PORT=8740                            # Http Proxy container external port (host -> container)
+HTTP_PROXY_INTERNAL_PORT=8080                            # Http Proxy container internal port (container -> container)
+HTTP_PROXY_EXTERNAL_SECURE_PORT=8742                     # Http Proxy container external secure port (host -> container)
+HTTP_PROXY_INTERNAL_SECURE_PORT=8433                     # Http Proxy container internal secure port (container -> container)
+HTTP_PROXY_WEB_EXTERNAL_PORT=8741                        # Http Proxy container external port (host -> container)
+HTTP_PROXY_WEB_INTERNAL_PORT=8081                        # Http Proxy container internal port (container -> container)
+HTTP_PROXY_WEB_EXTERNAL_SECURE_PORT=8743                 # Http Proxy container external secure port (host -> container)
+HTTP_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Http Proxy container internal secure port (container -> container
+HTTP_PROXY_CONFIG_PORT=0                                 # Port number for proxy config, will be set if proxy is started
+HTTP_PROXY_CONFIG_HOST_NAME=""                           # Proxy host, will be set if proxy is started
+HTTP_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+HTTP_PROXY_COMPOSE_DIR="httpproxy"                       # Dir in simulator_group for docker-compose
+HTTP_PROXY_BUILD_DIR="http-https-proxy"                  # Dir in simulator_group for image build - note, reuses source from kubeproxy
+
+KUBE_PROXY_APP_NAME="kubeproxy"                          # Name of the Kube Http Proxy container
+KUBE_PROXY_DISPLAY_NAME="Kube Http Proxy"
+KUBE_PROXY_EXTERNAL_PORT=8730                            # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_INTERNAL_PORT=8080                            # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_EXTERNAL_SECURE_PORT=8782                     # Kube Proxy container external secure port (host -> container)
+KUBE_PROXY_INTERNAL_SECURE_PORT=8433                     # Kube Proxy container internal secure port (container -> container)
+KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
+KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
+KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
+KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
+
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
+########################################
+# Setting for common curl-base function
+########################################
+
+UUID=""                                                  # UUID used as prefix to the policy id to simulate a real UUID
+                                                         # Testscript need to set the UUID to use other this empty prefix is used
index 77b4da2..245dec8 100755 (executable)
@@ -143,6 +143,13 @@ DMAAP_ADP_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
 DMAAP_ADP_IMAGE_TAG_REMOTE="1.0.0"
 DMAAP_ADP_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
 
+# Helm Manager
+HELM_MANAGER_IMAGE_BASE="o-ran-sc/nonrtric-helm-manager"
+HELM_MANAGER_IMAGE_TAG_LOCAL="1.1.0-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE_SNAPSHOT="1.1.0-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE="1.1.0"
+HELM_MANAGER_IMAGE_TAG_REMOTE_RELEASE="1.1.0"
+
 #Consul remote image and tag
 CONSUL_IMAGE_BASE="consul"
 CONSUL_IMAGE_TAG_REMOTE_PROXY="1.7.2"
@@ -207,8 +214,13 @@ KAFKAPC_IMAGE_BASE="kafka-procon"
 KAFKAPC_IMAGE_TAG_LOCAL="latest"
 #No local image for pvc cleaner, remote image always used
 
+#PVC Cleaner remote image and tag
+CHART_MUS_IMAGE_BASE="ghcr.io/helm/chartmuseum"
+CHART_MUS_IMAGE_TAG_REMOTE_OTHER="v0.13.1"
+#No local image for chart museum, remote image always used
+
 # List of app short names produced by the project
-PROJECT_IMAGES_APP_NAMES="PA ICS CP RC RICSIM NGW DMAAPADP DMAAPMED"  # Add SDNC here if oran image is used
+PROJECT_IMAGES_APP_NAMES="PA ICS CP RC RICSIM NGW DMAAPADP DMAAPMED HELMMANAGER"  # Add SDNC here if oran image is used
 
 # List of app short names which images pulled from ORAN
 ORAN_IMAGES_APP_NAMES=""  # Not used
@@ -538,6 +550,28 @@ KAFKAPC_INTERNAL_SECURE_PORT=8091                        # Kafka procon containe
 KAFKAPC_ALIVE_URL="/"                               # Base path for alive check
 KAFKAPC_COMPOSE_DIR="kafka-procon"                       # Dir in simulator_group for docker-compose
 KAFKAPC_BUILD_DIR="kafka-procon"                         # Build dir
+
+CHART_MUS_APP_NAME="chartmuseum"                         # Name for the chart museum app
+CHART_MUS_DISPLAY_NAME="Chart Museum"
+CHART_MUS_EXTERNAL_PORT=8201                             # chart museum container external port (host -> container)
+CHART_MUS_INTERNAL_PORT=8080                             # chart museum container internal port (container -> container)
+CHART_MUS_ALIVE_URL="/health"                            # Base path for alive check
+CHART_MUS_COMPOSE_DIR="chartmuseum"                      # Dir in simulator_group for docker-compose
+CHART_MUS_CHART_CONTR_CHARTS="/tmp/charts"               # Local dir container for charts
+
+HELM_MANAGER_APP_NAME="helmmanagerservice"               # Name for the helm manager app
+HELM_MANAGER_DISPLAY_NAME="Helm Manager"
+HELM_MANAGER_EXTERNAL_PORT=8211                          # helm manager container external port (host -> container)
+HELM_MANAGER_INTERNAL_PORT=8083                          # helm manager container internal port (container -> container)
+HELM_MANAGER_EXTERNAL_SECURE_PORT=8212                   # helm manager container external secure port (host -> container)
+HELM_MANAGER_INTERNAL_SECURE_PORT=8443                   # helm manager container internal secure port (container -> container)
+HELM_MANAGER_CLUSTER_ROLE=cluster-admin                  # Kubernetes cluster role for helm manager
+HELM_MANAGER_SA_NAME=helm-manager-sa                     # Service account name
+HELM_MANAGER_ALIVE_URL="/helm/charts"                    # Base path for alive check
+HELM_MANAGER_COMPOSE_DIR="helmmanager"                   # Dir in simulator_group for docker-compose
+HELM_MANAGER_USER="helmadmin"
+HELM_MANAGER_PWD="itisasecret"
+
 ########################################
 # Setting for common curl-base function
 ########################################
diff --git a/test/common/test_env-oran-f-release.sh b/test/common/test_env-oran-f-release.sh
new file mode 100755 (executable)
index 0000000..0ba821e
--- /dev/null
@@ -0,0 +1,579 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+#Profile for ORAN Cherry
+TEST_ENV_PROFILE="ORAN-F-RELEASE"
+FLAVOUR="ORAN"
+
+########################################
+## Nexus repo settings
+########################################
+
+# Nexus repos for developed images
+NEXUS_PROXY_REPO="nexus3.o-ran-sc.org:10001/"
+NEXUS_RELEASE_REPO="nexus3.o-ran-sc.org:10002/"
+NEXUS_SNAPSHOT_REPO="nexus3.o-ran-sc.org:10003/"
+NEXUS_STAGING_REPO="nexus3.o-ran-sc.org:10004/"
+
+# Nexus repos for images used by test (not developed by the project)
+NEXUS_RELEASE_REPO_ONAP="nexus3.onap.org:10002/"  # Only for released ONAP images
+NEXUS_RELEASE_REPO_ORAN=$NEXUS_RELEASE_REPO
+
+########################################
+# Set up of image and tags for the test.
+########################################
+
+# NOTE: One environment variable containing the image name and tag is create by the test script
+# for each image from the env variables below.
+# The variable is created by removing the suffix "_BASE" from the base image variable name.
+# Example: POLICY_AGENT_IMAGE_BASE -> POLICY_AGENT_IMAGE
+# This var will point to the local or remote image depending on cmd line arguments.
+# In addition, the repo and the image tag version are selected from the list of image tags based on the cmd line argurment.
+# For images built by the script, only tag #1 shall be specified
+# For project images, only tag #1, #2, #3 and #4 shall be specified
+# For ORAN images (non project), only tag #5 shall be specified
+# For ONAP images (non project), only tag #6 shall be specified
+# For all other images, only tag #7 shall be specified
+# 1 XXX_LOCAL: local images: <image-name>:<local-tag>
+# 2 XXX_REMOTE_SNAPSHOT: snapshot images: <snapshot-nexus-repo><image-name>:<snapshot-tag>
+# 3 XXX_REMOTE: staging images: <staging-nexus-repo><image-name>:<staging-tag>
+# 4 XXX_REMOTE_RELEASE: release images: <release-nexus-repo><image-name>:<release-tag>
+# 5 XXX_REMOTE_RELEASE_ORAN: ORAN release images: <oran-release-nexus-repo><image-name>:<release-tag>
+# 6 XXX_REMOTE_RELEASE_ONAP: ONAP release images: <onap-release-nexus-repo><image-name>:<release-tag>
+# 7 XXX_PROXY: other images, not produced by the project: <proxy-nexus-repo><mage-name>:<proxy-tag>
+
+
+# Policy Agent base image and tags
+POLICY_AGENT_IMAGE_BASE="o-ran-sc/nonrtric-a1-policy-management-service"
+POLICY_AGENT_IMAGE_TAG_LOCAL="2.3.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="2.3.0-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="2.3.0"
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="2.3.0"
+
+# ICS image and tags
+ICS_IMAGE_BASE="o-ran-sc/nonrtric-information-coordinator-service"
+ICS_IMAGE_TAG_LOCAL="1.2.0-SNAPSHOT"
+ICS_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.0-SNAPSHOT"
+ICS_IMAGE_TAG_REMOTE="1.2.0"
+ICS_IMAGE_TAG_REMOTE_RELEASE="1.2.0"
+#Note: Update var ICS_FEATURE_LEVEL if image version is changed
+
+#Control Panel image and tags
+CONTROL_PANEL_IMAGE_BASE="o-ran-sc/nonrtric-controlpanel"
+CONTROL_PANEL_IMAGE_TAG_LOCAL="2.3.0-SNAPSHOT"
+CONTROL_PANEL_IMAGE_TAG_REMOTE_SNAPSHOT="2.3.0-SNAPSHOT"
+CONTROL_PANEL_IMAGE_TAG_REMOTE="2.3.0"
+CONTROL_PANEL_IMAGE_TAG_REMOTE_RELEASE="2.3.0"
+
+
+# Gateway image and tags
+NRT_GATEWAY_IMAGE_BASE="o-ran-sc/nonrtric-gateway"
+NRT_GATEWAY_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+NRT_GATEWAY_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+NRT_GATEWAY_IMAGE_TAG_REMOTE="1.0.0"
+NRT_GATEWAY_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+
+
+# SDNC A1 Controller image and tags - Note using released honolulu ONAP image
+SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE_ONAP="2.1.6"
+#No local image for ONAP SDNC, remote release image always used
+
+# ORAN SDNC adapter kept as reference
+# SDNC A1 Controller image and tags - still using cherry version, no new version for D-Release
+#SDNC_A1_CONTROLLER_IMAGE_BASE="o-ran-sc/nonrtric-a1-controller"
+#SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.0.1-SNAPSHOT"
+#SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.0.1-SNAPSHOT"
+#SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.0.1"
+#SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.0.1"
+
+#SDNC DB remote image and tag
+#The DB is part of SDNC so handled in the same way as SDNC
+SDNC_DB_IMAGE_BASE="mariadb"
+SDNC_DB_IMAGE_TAG_REMOTE_PROXY="10.5"
+
+#Older SDNC db image kept for reference
+#SDNC DB remote image and tag
+#SDNC_DB_IMAGE_BASE="mysql/mysql-server"
+#SDNC_DB_IMAGE_TAG_REMOTE_PROXY="5.6"
+#No local image for SSDNC DB, remote image always used
+
+
+# RAPP Catalogue image and tags
+RAPP_CAT_IMAGE_BASE="o-ran-sc/nonrtric-r-app-catalogue"
+RAPP_CAT_IMAGE_TAG_LOCAL="1.0.1-SNAPSHOT"
+RAPP_CAT_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.1-SNAPSHOT"
+RAPP_CAT_IMAGE_TAG_REMOTE="1.0.1"
+RAPP_CAT_IMAGE_TAG_REMOTE_RELEASE="1.0.1"
+
+
+# Near RT RIC Simulator image and tags - same version as cherry
+RIC_SIM_IMAGE_BASE="o-ran-sc/a1-simulator"
+RIC_SIM_IMAGE_TAG_LOCAL="latest"
+RIC_SIM_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.0-SNAPSHOT"
+RIC_SIM_IMAGE_TAG_REMOTE="2.2.0"
+RIC_SIM_IMAGE_TAG_REMOTE_RELEASE="2.2.0"
+
+# DMAAP Mediator Service
+DMAAP_MED_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-mediator-producer"
+DMAAP_MED_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE="1.0.0"
+DMAAP_MED_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+
+# DMAAP Adapter Service
+DMAAP_ADP_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-adaptor"
+DMAAP_ADP_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE="1.0.0"
+DMAAP_ADP_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+
+# Helm Manager
+HELM_MANAGER_IMAGE_BASE="o-ran-sc/nonrtric-helm-manager"
+HELM_MANAGER_IMAGE_TAG_LOCAL="1.2.0-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.0-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE="1.2.0"
+HELM_MANAGER_IMAGE_TAG_REMOTE_RELEASE="1.2.0"
+
+#Consul remote image and tag
+CONSUL_IMAGE_BASE="consul"
+CONSUL_IMAGE_TAG_REMOTE_PROXY="1.7.2"
+#No local image for Consul, remote image always used
+
+
+#CBS remote image and tag
+CBS_IMAGE_BASE="onap/org.onap.dcaegen2.platform.configbinding.app-app"
+CBS_IMAGE_TAG_REMOTE_RELEASE_ONAP="2.3.0"
+#No local image for CBS, remote image always used
+
+
+#MR stub image and tag
+MRSTUB_IMAGE_BASE="mrstub"
+MRSTUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for MR stub, local image always used
+
+
+#Callback receiver image and tag
+CR_IMAGE_BASE="callback-receiver"
+CR_IMAGE_TAG_LOCAL="latest"
+#No remote image for CR, local image always used
+
+
+#Producer stub image and tag
+PROD_STUB_IMAGE_BASE="producer-stub"
+PROD_STUB_IMAGE_TAG_LOCAL="latest"
+#No remote image for producer stub, local image always used
+
+#Http proxy remote image and tag
+HTTP_PROXY_IMAGE_BASE="nodejs-http-proxy"
+HTTP_PROXY_IMAGE_TAG_LOCAL="latest"
+#No local image for http proxy, remote image always used
+
+#ONAP Zookeeper remote image and tag
+ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
+#No local image for ONAP Zookeeper, remote image always used
+
+#ONAP Kafka remote image and tag
+ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
+#No local image for ONAP Kafka, remote image always used
+
+#ONAP DMAAP-MR remote image and tag
+ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
+#No local image for ONAP DMAAP-MR, remote image always used
+
+#Kube proxy remote image and tag
+KUBE_PROXY_IMAGE_BASE="nodejs-kube-proxy"
+KUBE_PROXY_IMAGE_TAG_LOCAL="latest"
+#No remote image for kube proxy, local image always used
+
+#PVC Cleaner remote image and tag
+PVC_CLEANER_IMAGE_BASE="ubuntu"
+PVC_CLEANER_IMAGE_TAG_REMOTE_PROXY="20.10"
+#No local image for pvc cleaner, remote image always used
+
+#Kafka Procon image and tag
+KAFKAPC_IMAGE_BASE="kafka-procon"
+KAFKAPC_IMAGE_TAG_LOCAL="latest"
+#No local image for pvc cleaner, remote image always used
+
+#PVC Cleaner remote image and tag
+CHART_MUS_IMAGE_BASE="ghcr.io/helm/chartmuseum"
+CHART_MUS_IMAGE_TAG_REMOTE_OTHER="v0.13.1"
+#No local image for chart museum, remote image always used
+
+# List of app short names produced by the project
+PROJECT_IMAGES_APP_NAMES="PA ICS CP RC RICSIM NGW DMAAPADP DMAAPMED HELMMANAGER"  # Add SDNC here if oran image is used
+
+# List of app short names which images pulled from ORAN
+ORAN_IMAGES_APP_NAMES=""  # Not used
+
+# List of app short names which images pulled from ONAP
+ONAP_IMAGES_APP_NAMES="CBS DMAAPMR SDNC"   # SDNC added as ONAP image
+
+
+########################################
+# Detailed settings per app
+########################################
+
+# Port number variables
+# =====================
+# Port number vars <name>_INTERNAL_PORT and <name>_INTERNAL_SECURE_PORT are set as pod/container port in kube and container port in docker
+#
+# Port number vars <name>_EXTERNAL_PORT and <name>_EXTERNAL_SECURE_PORT are set as svc port in kube and localhost port in docker
+#
+# For some components, eg. MR, can be represented as the MR-STUB and/or the DMAAP MR. For these components
+# special vars nameed <name>_LOSTHOST_PORT and <name>_LOCALHOST_SECURE_PORT are used as localhost ports instead of
+# name>_EXTERNAL_PORT and <name>_EXTERNAL_SECURE_PORT ports in docker in order to prevent overalapping ports on local host
+#
+# For KUBE PROXY there are special external port for docker as the proyx exposes also the kube svc port on localhost,
+# therefore a special set of external port are needed for docker <name>_DOCKER_EXTERNAL_PORT and <name>_DOCKER_EXTERNAL_SECURE_PORT
+
+DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker private network
+
+KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
+KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
+KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
+
+POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
+POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
+POLICY_AGENT_EXTERNAL_SECURE_PORT=8433                   # Policy Agent container external secure port (host -> container)
+POLICY_AGENT_INTERNAL_SECURE_PORT=8433                   # Policy Agent container internal secure port (container -> container)
+POLICY_AGENT_APIS="V1 V2"                                # Supported northbound api versions
+PMS_VERSION="V2"                                         # Tested version of northbound API
+PMS_API_PREFIX="/a1-policy"                              # api url prefix, only for V2
+
+POLICY_AGENT_APP_NAME="policymanagementservice"          # Name for Policy Agent container
+POLICY_AGENT_DISPLAY_NAME="Policy Management Service"
+POLICY_AGENT_HOST_MNT_DIR="./mnt"                        # Mounted dir, relative to compose file, on the host
+POLICY_AGENT_LOGPATH="/var/log/policy-agent/application.log" # Path the application log in the Policy Agent container
+POLICY_AGENT_APP_NAME_ALIAS="policy-agent-container"     # Alias name, name used by the control panel
+POLICY_AGENT_CONFIG_KEY="policy-agent"                   # Key for consul config
+POLICY_AGENT_PKG_NAME="org.onap.ccsdk.oran.a1policymanagementservice"  # Java base package name
+POLICY_AGENT_ACTUATOR="/actuator/loggers/$POLICY_AGENT_PKG_NAME" # Url for trace/debug
+POLICY_AGENT_ALIVE_URL="$PMS_API_PREFIX/v2/status"       # Base path for alive check
+POLICY_AGENT_COMPOSE_DIR="policy_agent"                  # Dir in simulator_group for docker-compose
+POLICY_AGENT_CONFIG_MOUNT_PATH="/opt/app/policy-agent/config" # Path in container for config file
+POLICY_AGENT_DATA_MOUNT_PATH="/opt/app/policy-agent/data" # Path in container for data file
+POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file name
+POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
+POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+
+ICS_APP_NAME="informationservice"                        # Name for ICS container
+ICS_DISPLAY_NAME="Information Coordinator Service"       # Display name for ICS container
+ICS_EXTERNAL_PORT=8083                                   # ICS container external port (host -> container)
+ICS_INTERNAL_PORT=8083                                   # ICS container internal port (container -> container)
+ICS_EXTERNAL_SECURE_PORT=8434                            # ICS container external secure port (host -> container)
+ICS_INTERNAL_SECURE_PORT=8434                            # ICS container internal secure port (container -> container)
+
+ICS_LOGPATH="/var/log/information-coordinator-service/application.log" # Path the application log in the ICS container
+ICS_APP_NAME_ALIAS="information-service-container"       # Alias name, name used by the control panel
+ICS_HOST_MNT_DIR="./mnt"                                 # Mounted db dir, relative to compose file, on the host
+ICS_CONTAINER_MNT_DIR="/var/information-coordinator-service" # Mounted dir in the container
+ICS_ACTUATOR="/actuator/loggers/org.oransc.information"  # Url for trace/debug
+ICS_CERT_MOUNT_DIR="./cert"
+ICS_ALIVE_URL="/status"                                  # Base path for alive check
+ICS_COMPOSE_DIR="ics"                                    # Dir in simulator_group for docker-compose
+ICS_CONFIG_MOUNT_PATH=/opt/app/information-coordinator-service/config # Internal container path for configuration
+ICS_CONFIG_FILE=application.yaml                         # Config file name
+ICS_VERSION="V1-2"                                       # Version where the types are decoupled from the producer registration
+ICS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO RESP_CODE_CHANGE_1"  # Space separated list of features
+
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
+MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
+MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
+MR_STUB_DISPLAY_NAME="Message Router stub"
+MR_STUB_CERT_MOUNT_DIR="./cert"
+MR_EXTERNAL_PORT=3904                                    # MR dmaap/stub container external port
+MR_INTERNAL_PORT=3904                                    # MR dmaap/stub container internal port
+MR_EXTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container external secure port
+MR_INTERNAL_SECURE_PORT=3905                             # MR dmaap/stub container internal secure port
+MR_DMAAP_LOCALHOST_PORT=3904                             # MR stub container external port (host -> container)
+MR_STUB_LOCALHOST_PORT=3908                              # MR stub container external port (host -> container)
+MR_DMAAP_LOCALHOST_SECURE_PORT=3905                      # MR stub container internal port (container -> container)
+MR_STUB_LOCALHOST_SECURE_PORT=3909                       # MR stub container external secure port (host -> container)
+MR_READ_TOPIC="A1-POLICY-AGENT-READ"                     # Read topic
+MR_WRITE_TOPIC="A1-POLICY-AGENT-WRITE"                   # Write topic
+MR_READ_URL="/events/$MR_READ_TOPIC/users/policy-agent?timeout=15000&limit=100" # Path to read messages from MR
+MR_WRITE_URL="/events/$MR_WRITE_TOPIC"                   # Path to write messages to MR
+MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub alive check
+MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
+MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
+MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
+MR_KAFKA_DOCKER_LOCALHOST_PORT=30098                     # Kafka port number for docker localhost
+MR_KAFKA_KUBE_NODE_PORT=30099                            # Kafka node port number for kube
+MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs1"                      # Config files dir on localhost
+
+CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
+CR_DISPLAY_NAME="Callback receiver"
+CR_EXTERNAL_PORT=8090                                    # Callback receiver container external port (host -> container)
+CR_INTERNAL_PORT=8090                                    # Callback receiver container internal port (container -> container)
+CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
+CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
+CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
+CR_ALIVE_URL="/reset"                                    # Base path for alive check
+CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
+
+PROD_STUB_APP_NAME="producer-stub"                       # Name for the Producer stub
+PROD_STUB_DISPLAY_NAME="Producer Stub"
+PROD_STUB_EXTERNAL_PORT=8092                             # Producer stub container external port (host -> container)
+PROD_STUB_INTERNAL_PORT=8092                             # Producer stub container internal port (container -> container)
+PROD_STUB_EXTERNAL_SECURE_PORT=8093                      # Producer stub container external secure port (host -> container)
+PROD_STUB_INTERNAL_SECURE_PORT=8093                      # Producer stub container internal secure port (container -> container)
+PROD_STUB_JOB_CALLBACK="/callbacks/job"                  # Callback path for job create/update/delete
+PROD_STUB_SUPERVISION_CALLBACK="/callbacks/supervision"  # Callback path for producre supervision
+PROD_STUB_ALIVE_URL="/"                                  # Base path for alive check
+PROD_STUB_COMPOSE_DIR="prodstub"                         # Dir in simulator_group for docker-compose
+
+CONSUL_HOST="consul-server"                              # Host name of consul
+CONSUL_DISPLAY_NAME="Consul"
+CONSUL_EXTERNAL_PORT=8500                                # Consul container external port (host -> container)
+CONSUL_INTERNAL_PORT=8500                                # Consul container internal port (container -> container)
+CONSUL_APP_NAME="polman-consul"                          # Name for consul container
+CONSUL_ALIVE_URL="/ui/dc1/kv"                            # Base path for alive check
+CONSUL_CBS_COMPOSE_DIR="consul_cbs"                      # Dir in simulator group for docker compose
+
+CBS_APP_NAME="polman-cbs"                                # Name for CBS container
+CBS_DISPLAY_NAME="Config Binding Service"
+CBS_EXTERNAL_PORT=10000                                  # CBS container external port (host -> container)
+CBS_INTERNAL_PORT=10000                                  # CBS container internal port (container -> container)
+CONFIG_BINDING_SERVICE="config-binding-service"          # Host name of CBS
+CBS_ALIVE_URL="/healthcheck"                             # Base path for alive check
+
+RIC_SIM_DISPLAY_NAME="Near-RT RIC A1 Simulator"
+RIC_SIM_BASE="g"                                         # Base name of the RIC Simulator container, shall be the group code
+                                                         # Note, a prefix is added to each container name by the .env file in the 'ric' dir
+RIC_SIM_PREFIX="ricsim"                                  # Prefix added to ric container name, added in the .env file in the 'ric' dir
+                                                         # This prefix can be changed from the command line
+RIC_SIM_INTERNAL_PORT=8085                               # RIC Simulator container internal port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_INTERNAL_SECURE_PORT=8185                        # RIC Simulator container internal secure port (container -> container).
+                                                         # (external ports allocated by docker)
+RIC_SIM_CERT_MOUNT_DIR="./cert"
+
+RIC_SIM_COMPOSE_DIR="ric"                                # Dir in simulator group for docker compose
+RIC_SIM_ALIVE_URL="/"                                    # Base path for alive check
+RIC_SIM_COMMON_SVC_NAME=""                               # Name of svc if one common svc is used for all ric sim groups (stateful sets)
+# Kept as reference for oran a1 adapter
+# SDNC_APP_NAME="a1controller"                             # Name of the SNDC A1 Controller container
+# SDNC_DISPLAY_NAME="SDNC A1 Controller"
+# SDNC_EXTERNAL_PORT=8282                                  # SNDC A1 Controller container external port (host -> container)
+# SDNC_INTERNAL_PORT=8181                                  # SNDC A1 Controller container internal port (container -> container)
+# SDNC_EXTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container external securee port (host -> container)
+# SDNC_INTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container internal secure port (container -> container)
+# SDNC_DB_APP_NAME="sdncdb"                                # Name of the SDNC DB container
+# SDNC_A1_TRUSTSTORE_PASSWORD=""                           # SDNC truststore password
+# SDNC_USER="admin"                                        # SDNC username
+# SDNC_PWD="Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"   # SNDC PWD
+# SDNC_API_URL="/restconf/operations/A1-ADAPTER-API:"      # Base url path for SNDC API
+# SDNC_ALIVE_URL="/apidoc/explorer/"                       # Base url path for SNDC API docs (for alive check)
+# SDNC_COMPOSE_DIR="sdnc"                                  # Dir in simulator_group for docker-compose
+# SDNC_COMPOSE_FILE="docker-compose.yml"
+# SDNC_KUBE_APP_FILE="app.yaml"
+# SDNC_KARAF_LOG="/opt/opendaylight/data/log/karaf.log"    # Path to karaf log
+# SDNC_RESPONSE_JSON_KEY="output"                          # Key name for output json in replies from sdnc
+
+# For ONAP sdan
+SDNC_APP_NAME="a1controller"                             # Name of the SNDC A1 Controller container
+SDNC_DISPLAY_NAME="SDNC A1 Controller"
+SDNC_EXTERNAL_PORT=8282                                  # SNDC A1 Controller container external port (host -> container)
+SDNC_INTERNAL_PORT=8181                                  # SNDC A1 Controller container internal port (container -> container)
+SDNC_EXTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container external securee port (host -> container)
+SDNC_INTERNAL_SECURE_PORT=8443                           # SNDC A1 Controller container internal secure port (container -> container)
+SDNC_DB_APP_NAME="sdncdb"                                # Name of the SDNC DB container
+SDNC_A1_TRUSTSTORE_PASSWORD="a1adapter"                  # SDNC truststore password
+SDNC_USER="admin"                                        # SDNC username
+SDNC_PWD="admin"                                         # SNDC PWD
+SDNC_PWD="Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"   # SNDC PWD
+#SDNC_API_URL="/rests/operations/A1-ADAPTER-API:"         # Base url path for SNDC API (for upgraded sdnc)
+SDNC_API_URL="/restconf/operations/A1-ADAPTER-API:"      # Base url path for SNDC API
+SDNC_ALIVE_URL="/apidoc/explorer/"                       # Base url path for SNDC API docs (for alive check)
+SDNC_COMPOSE_DIR="sdnc"
+SDNC_COMPOSE_FILE="docker-compose-2.yml"
+SDNC_KUBE_APP_FILE="app2.yaml"
+SDNC_KARAF_LOG="/opt/opendaylight/data/log/karaf.log"    # Path to karaf log
+#SDNC_RESPONSE_JSON_KEY="A1-ADAPTER-API:output"           # Key name for output json in replies from sdnc (for upgraded sdnc)
+SDNC_RESPONSE_JSON_KEY="output"                          # Key name for output json in replies from sdnc
+SDNC_FEATURE_LEVEL=""                                    # Space separated list of features
+
+RAPP_CAT_APP_NAME="rappcatalogueservice"                 # Name for the RAPP Catalogue
+RAPP_CAT_DISPLAY_NAME="RAPP Catalogue"
+RAPP_CAT_EXTERNAL_PORT=8680                              # RAPP Catalogue container external port (host -> container)
+RAPP_CAT_INTERNAL_PORT=8680                              # RAPP Catalogue container internal port (container -> container)
+RAPP_CAT_EXTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container external secure port (host -> container)
+RAPP_CAT_INTERNAL_SECURE_PORT=8633                       # RAPP Catalogue container internal secure port (container -> container)
+RAPP_CAT_ALIVE_URL="/services"                           # Base path for alive check
+RAPP_CAT_COMPOSE_DIR="rapp_catalogue"                    # Dir in simulator_group for docker-compose
+
+CONTROL_PANEL_APP_NAME="controlpanel"                    # Name of the Control Panel container
+CONTROL_PANEL_DISPLAY_NAME="Control Panel"
+CONTROL_PANEL_EXTERNAL_PORT=8080                         # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_PORT=8080                         # Control Panel container internal port (container -> container)
+CONTROL_PANEL_EXTERNAL_SECURE_PORT=8880                  # Control Panel container external port (host -> container)
+CONTROL_PANEL_INTERNAL_SECURE_PORT=8082                  # Control Panel container internal port (container -> container)
+CONTROL_PANEL_LOGPATH="/var/log/nonrtric-gateway/application.log"  # Path the application log in the Control Panel container
+CONTROL_PANEL_ALIVE_URL="/"                              # Base path for alive check
+CONTROL_PANEL_COMPOSE_DIR="control_panel"                # Dir in simulator_group for docker-compose
+CONTROL_PANEL_CONFIG_FILE=nginx.conf                     # Config file name
+CONTROL_PANEL_HOST_MNT_DIR="./mnt"                       # Mounted dir, relative to compose file, on the host
+CONTROL_PANEL_CONFIG_MOUNT_PATH=/etc/nginx               # Container internal path for config
+CONTROL_PANEL_NGINX_KUBE_RESOLVER="kube-dns.kube-system.svc.cluster.local valid=5s"  #nginx resolver for kube
+CONTROL_PANEL_NGINX_DOCKER_RESOLVER="127.0.0.11"         # nginx resolver for docker
+CONTROL_PANEL_PATH_POLICY_PREFIX="/a1-policy/"           # Path prefix for forwarding policy calls to NGW
+CONTROL_PANEL_PATH_ICS_PREFIX="/data-producer/"          # Path prefix for forwarding ics calls to NGW
+CONTROL_PANEL_PATH_ICS_PREFIX2="/data-consumer/"         # Path prefix for forwarding ics calls to NGW
+
+NRT_GATEWAY_APP_NAME="nonrtricgateway"                   # Name of the Gateway container
+NRT_GATEWAY_DISPLAY_NAME="NonRT-RIC Gateway"
+NRT_GATEWAY_EXTERNAL_PORT=9090                           # Gateway container external port (host -> container)
+NRT_GATEWAY_INTERNAL_PORT=9090                           # Gateway container internal port (container -> container)
+NRT_GATEWAY_EXTERNAL_SECURE_PORT=9091                    # Gateway container external port (host -> container)
+NRT_GATEWAY_INTERNAL_SECURE_PORT=9091                    # Gateway container internal port (container -> container)
+NRT_GATEWAY_LOGPATH="/var/log/nonrtric-gateway/application.log" # Path the application log in the Gateway container
+NRT_GATEWAY_HOST_MNT_DIR="./mnt"                         # Mounted dir, relative to compose file, on the host
+NRT_GATEWAY_ALIVE_URL="/actuator/metrics"                # Base path for alive check
+NRT_GATEWAY_COMPOSE_DIR="ngw"                            # Dir in simulator_group for docker-compose
+NRT_GATEWAY_CONFIG_MOUNT_PATH=/opt/app/nonrtric-gateway/config  # Container internal path for config
+NRT_GATEWAY_CONFIG_FILE=application.yaml                 # Config file name
+NRT_GATEWAY_PKG_NAME="org.springframework.cloud.gateway" # Java base package name
+NRT_GATEWAY_ACTUATOR="/actuator/loggers/$NRT_GATEWAY_PKG_NAME" # Url for trace/debug
+
+HTTP_PROXY_APP_NAME="httpproxy"                          # Name of the Http Proxy container
+HTTP_PROXY_DISPLAY_NAME="Http Proxy"
+HTTP_PROXY_EXTERNAL_PORT=8740                            # Http Proxy container external port (host -> container)
+HTTP_PROXY_INTERNAL_PORT=8080                            # Http Proxy container internal port (container -> container)
+HTTP_PROXY_EXTERNAL_SECURE_PORT=8742                     # Http Proxy container external secure port (host -> container)
+HTTP_PROXY_INTERNAL_SECURE_PORT=8433                     # Http Proxy container internal secure port (container -> container)
+HTTP_PROXY_WEB_EXTERNAL_PORT=8741                        # Http Proxy container external port (host -> container)
+HTTP_PROXY_WEB_INTERNAL_PORT=8081                        # Http Proxy container internal port (container -> container)
+HTTP_PROXY_WEB_EXTERNAL_SECURE_PORT=8743                 # Http Proxy container external secure port (host -> container)
+HTTP_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Http Proxy container internal secure port (container -> container
+HTTP_PROXY_CONFIG_PORT=0                                 # Port number for proxy config, will be set if proxy is started
+HTTP_PROXY_CONFIG_HOST_NAME=""                           # Proxy host, will be set if proxy is started
+HTTP_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+HTTP_PROXY_COMPOSE_DIR="httpproxy"                       # Dir in simulator_group for docker-compose
+HTTP_PROXY_BUILD_DIR="http-https-proxy"                  # Dir in simulator_group for image build - note, reuses source from kubeproxy
+
+KUBE_PROXY_APP_NAME="kubeproxy"                          # Name of the Kube Http Proxy container
+KUBE_PROXY_DISPLAY_NAME="Kube Http Proxy"
+KUBE_PROXY_EXTERNAL_PORT=8730                            # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_INTERNAL_PORT=8080                            # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_EXTERNAL_SECURE_PORT=8782                     # Kube Proxy container external secure port (host -> container)
+KUBE_PROXY_INTERNAL_SECURE_PORT=8433                     # Kube Proxy container internal secure port (container -> container)
+KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy container external port (host -> container)
+KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
+KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
+KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
+KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
+KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
+KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
+
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
+DMAAP_ADP_APP_NAME="dmaapadapterservice"                 # Name for Dmaap Adapter container
+DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service"           # Display name for Dmaap Adapter container
+DMAAP_ADP_EXTERNAL_PORT=9087                             # Dmaap Adapter container external port (host -> container)
+DMAAP_ADP_INTERNAL_PORT=8084                             # Dmaap Adapter container internal port (container -> container)
+DMAAP_ADP_EXTERNAL_SECURE_PORT=9088                      # Dmaap Adapter container external secure port (host -> container)
+DMAAP_ADP_INTERNAL_SECURE_PORT=8435                      # Dmaap Adapter container internal secure port (container -> container)
+
+#DMAAP_ADP_LOGPATH="/var/log/dmaap-adaptor-service/application.log" # Path the application log in the Dmaap Adapter container
+DMAAP_ADP_HOST_MNT_DIR="./mnt"                           # Mounted db dir, relative to compose file, on the host
+#MAAP_ADP_CONTAINER_MNT_DIR="/var/dmaap-adaptor-service" # Mounted dir in the container
+DMAAP_ADP_ACTUATOR="/actuator/loggers/org.oran.dmaapadapter"   # Url for trace/debug
+#DMAAP_ADP_CERT_MOUNT_DIR="./cert"
+DMAAP_ADP_ALIVE_URL="/actuator/info"                     # Base path for alive check
+DMAAP_ADP_COMPOSE_DIR="dmaapadp"                         # Dir in simulator_group for docker-compose
+DMAAP_ADP_CONFIG_MOUNT_PATH="/opt/app/dmaap-adaptor-service/config" # Internal container path for configuration
+DMAAP_ADP_DATA_MOUNT_PATH="/opt/app/dmaap-adaptor-service/data" # Path in container for data file
+DMAAP_ADP_DATA_FILE="application_configuration.json"  # Container data file name
+DMAAP_ADP_CONFIG_FILE=application.yaml                   # Config file name
+
+DMAAP_MED_APP_NAME="dmaapmediatorservice"                # Name for Dmaap Mediator container
+DMAAP_MED_DISPLAY_NAME="Dmaap Mediator Service"          # Display name for Dmaap Mediator container
+DMAAP_MED_EXTERNAL_PORT=8085                             # Dmaap Mediator container external port (host -> container)
+DMAAP_MED_INTERNAL_PORT=8085                             # Dmaap Mediator container internal port (container -> container)
+DMAAP_MED_EXTERNAL_SECURE_PORT=8185                      # Dmaap Mediator container external secure port (host -> container)
+DMAAP_MED_INTERNAL_SECURE_PORT=8185                      # Dmaap Mediator container internal secure port (container -> container)
+
+DMAAP_MED_LOGPATH="/var/log/dmaap-adaptor-service/application.log" # Path the application log in the Dmaap Mediator container
+DMAAP_MED_HOST_MNT_DIR="./mnt"                          # Mounted db dir, relative to compose file, on the host
+#MAAP_ADP_CONTAINER_MNT_DIR="/var/dmaap-adaptor-service" # Mounted dir in the container
+#DMAAP_MED_ACTUATOR="/actuator/loggers/org.oransc.information"   # Url for trace/debug
+#DMAAP_MED_CERT_MOUNT_DIR="./cert"
+DMAAP_MED_ALIVE_URL="/status"                            # Base path for alive check
+DMAAP_MED_COMPOSE_DIR="dmaapmed"                         # Dir in simulator_group for docker-compose
+#MAAP_MED_CONFIG_MOUNT_PATH="/app"                       # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs"                     # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json"                   # Container data file name
+
+KAFKAPC_APP_NAME="kafka-procon"                          # Name for the Kafka procon
+KAFKAPC_DISPLAY_NAME="Kafaka Producer/Consumer"
+KAFKAPC_EXTERNAL_PORT=8096                               # Kafka procon container external port (host -> container)
+KAFKAPC_INTERNAL_PORT=8090                               # Kafka procon container internal port (container -> container)
+KAFKAPC_EXTERNAL_SECURE_PORT=8097                        # Kafka procon container external secure port (host -> container)
+KAFKAPC_INTERNAL_SECURE_PORT=8091                        # Kafka procon container internal secure port (container -> container)
+KAFKAPC_ALIVE_URL="/"                                    # Base path for alive check
+KAFKAPC_COMPOSE_DIR="kafka-procon"                       # Dir in simulator_group for docker-compose
+KAFKAPC_BUILD_DIR="kafka-procon"                         # Build dir
+
+CHART_MUS_APP_NAME="chartmuseum"                         # Name for the chart museum app
+CHART_MUS_DISPLAY_NAME="Chart Museum"
+CHART_MUS_EXTERNAL_PORT=8201                             # chart museum container external port (host -> container)
+CHART_MUS_INTERNAL_PORT=8080                             # chart museum container internal port (container -> container)
+CHART_MUS_ALIVE_URL="/health"                            # Base path for alive check
+CHART_MUS_COMPOSE_DIR="chartmuseum"                      # Dir in simulator_group for docker-compose
+CHART_MUS_CHART_CONTR_CHARTS="/tmp/charts"               # Local dir container for charts
+
+HELM_MANAGER_APP_NAME="helmmanagerservice"               # Name for the helm manager app
+HELM_MANAGER_DISPLAY_NAME="Helm Manager"
+HELM_MANAGER_EXTERNAL_PORT=8211                          # helm manager container external port (host -> container)
+HELM_MANAGER_INTERNAL_PORT=8083                          # helm manager container internal port (container -> container)
+HELM_MANAGER_EXTERNAL_SECURE_PORT=8212                   # helm manager container external secure port (host -> container)
+HELM_MANAGER_INTERNAL_SECURE_PORT=8443                   # helm manager container internal secure port (container -> container)
+HELM_MANAGER_CLUSTER_ROLE=cluster-admin                  # Kubernetes cluster role for helm manager
+HELM_MANAGER_SA_NAME=helm-manager-sa                     # Service account name
+HELM_MANAGER_ALIVE_URL="/helm/charts"                    # Base path for alive check
+HELM_MANAGER_COMPOSE_DIR="helmmanager"                   # Dir in simulator_group for docker-compose
+HELM_MANAGER_USER="helmadmin"
+HELM_MANAGER_PWD="itisasecret"
+########################################
+# Setting for common curl-base function
+########################################
+
+UUID=""                                                  # UUID used as prefix to the policy id to simulate a real UUID
+                                                         # Testscript need to set the UUID otherwise this empty prefix is used
index 39f6284..da2abf5 100755 (executable)
@@ -163,18 +163,51 @@ TESTLOGS=$PWD/logs
 # files in the ./tmp is moved to ./tmp/prev when a new test is started
 if [ ! -d "tmp" ]; then
     mkdir tmp
+       if [ $? -ne 0 ]; then
+               echo "Cannot create dir for temp files, $PWD/tmp"
+               echo "Exiting...."
+               exit 1
+       fi
 fi
 curdir=$PWD
 cd tmp
 if [ $? -ne 0 ]; then
        echo "Cannot cd to $PWD/tmp"
-       echo "Dir cannot be created. Exiting...."
+       echo "Exiting...."
+       exit 1
 fi
+
+TESTENV_TEMP_FILES=$PWD
+
 if [ ! -d "prev" ]; then
     mkdir prev
+       if [ $? -ne 0 ]; then
+               echo "Cannot create dir for previous temp files, $PWD/prev"
+               echo "Exiting...."
+               exit 1
+       fi
 fi
+
+TMPFILES=$(ls -A  | grep -vw prev)
+if [ ! -z "$TMPFILES" ]; then
+       cp -r $TMPFILES prev   #Move all temp files to prev dir
+       if [ $? -ne 0 ]; then
+               echo "Cannot move temp files in $PWD to previous temp files in, $PWD/prev"
+               echo "Exiting...."
+               exit 1
+       fi
+       if [ $(pwd | xargs basename) == "tmp" ]; then    #Check that current dir is tmp...for safety
+
+               rm -rf $TMPFILES # Remove all temp files
+       fi
+fi
+
 cd $curdir
-mv ./tmp/* ./tmp/prev 2> /dev/null
+if [ $? -ne 0 ]; then
+       echo "Cannot cd to $curdir"
+       echo "Exiting...."
+       exit 1
+fi
 
 # Create a http message log for this testcase
 HTTPLOG=$PWD"/.httplog_"$ATC".txt"
@@ -775,6 +808,10 @@ if [ ! -z "$TMP_APPS" ]; then
                done
                echo " Auto-adding system app   $padded_iapp  Sourcing $file_pointer"
                . $file_pointer
+               if [ $? -ne 0 ]; then
+                       echo " Include file $file_pointer contain errors. Exiting..."
+                       exit 1
+               fi
                __added_apps=" $iapp "$__added_apps
        done
 else
@@ -797,11 +834,15 @@ echo -e $BOLD"Auto adding included apps"$EBOLD
                                padded_iapp=$padded_iapp" "
                        done
                        echo " Auto-adding included app $padded_iapp  Sourcing $file_pointer"
-                       . $file_pointer
                        if [ ! -f "$file_pointer" ]; then
                                echo " Include file $file_pointer for app $iapp does not exist"
                                exit 1
                        fi
+                       . $file_pointer
+                       if [ $? -ne 0 ]; then
+                               echo " Include file $file_pointer contain errors. Exiting..."
+                               exit 1
+                       fi
                fi
        done
 echo ""
@@ -1299,6 +1340,9 @@ setup_testenvironment() {
                        # If the image suffix is none, then the component decides the suffix
                        function_pointer="__"$imagename"_imagesetup"
                        $function_pointer $IMAGE_SUFFIX
+
+                       function_pointer="__"$imagename"_test_requirements"
+                       $function_pointer
                fi
        done
 
@@ -2016,7 +2060,7 @@ __kube_delete_all_resources() {
        namespace=$1
        labelname=$2
        labelid=$3
-       resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes"
+       resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes serviceaccounts clusterrolebindings"
        deleted_resourcetypes=""
        for restype in $resources; do
                ns_flag="-n $namespace"
@@ -2025,6 +2069,10 @@ __kube_delete_all_resources() {
                        ns_flag=""
                        ns_text=""
                fi
+               if [ $restype == "clusterrolebindings" ]; then
+                       ns_flag=""
+                       ns_text=""
+               fi
                result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
                if [ $? -eq 0 ] && [ ! -z "$result" ]; then
                        deleted_resourcetypes=$deleted_resourcetypes" "$restype
@@ -2103,6 +2151,51 @@ __kube_create_namespace() {
        return 0
 }
 
+# Removes a namespace if it exists
+# args: <namespace>
+# (Not for test scripts)
+__kube_delete_namespace() {
+
+       #Check if test namespace exists, if so remove it
+       kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+       if [ $? -eq 0 ]; then
+               echo -ne " Removing namespace "$1 $SAMELINE
+               kubectl delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
+               if [ $? -ne 0 ]; then
+                       echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
+                       ((RES_CONF_FAIL++))
+                       echo "  Message: $(<./tmp/kubeerr)"
+                       return 1
+               else
+                       echo -e " Removing namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN"
+               fi
+       else
+               echo -e " Namespace $1 $GREEN$BOLD does not exist, OK $EBOLD$EGREEN"
+       fi
+       return 0
+}
+
+# Removes a namespace
+# args: <namespace>
+# (Not for test scripts)
+clean_and_create_namespace() {
+       __log_conf_start $@
+
+    if [ $# -ne 1 ]; then
+               __print_err "<namespace>" $@
+               return 1
+       fi
+       __kube_delete_namespace $1
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
+       __kube_create_namespace $1
+       if [ $? -ne 0 ]; then
+               return 1
+       fi
+
+}
+
 # Find the host ip of an app (using the service resource)
 # args: <app-name> <namespace>
 # (Not for test scripts)
@@ -2346,14 +2439,14 @@ clean_environment() {
                __clean_kube
                if [ $PRE_CLEAN -eq 1 ]; then
                        echo " Cleaning docker resouces to free up resources, may take time..."
-                       ../common/clean_docker.sh 2&>1 /dev/null
+                       ../common/clean_docker.sh 2>&1 /dev/null
                        echo ""
                fi
        else
                __clean_containers
                if [ $PRE_CLEAN -eq 1 ]; then
                        echo " Cleaning kubernetes resouces to free up resources, may take time..."
-                       ../common/clean_kube.sh 2&>1 /dev/null
+                       ../common/clean_kube.sh 2>&1 /dev/null
                        echo ""
                fi
        fi
@@ -2456,9 +2549,14 @@ __start_container() {
 
        envsubst < $compose_file > "gen_"$compose_file
        compose_file="gen_"$compose_file
+       if [ $DOCKER_COMPOSE_VERION == "V1" ]; then
+               docker_compose_cmd="docker-compose"
+       else
+               docker_compose_cmd="docker compose"
+       fi
 
        if [ "$compose_args" == "NODOCKERARGS" ]; then
-               docker-compose -f $compose_file up -d &> .dockererr
+               $docker_compose_cmd -f $compose_file up -d &> .dockererr
                if [ $? -ne 0 ]; then
                        echo -e $RED"Problem to launch container(s) with docker-compose"$ERED
                        cat .dockererr
@@ -2466,7 +2564,7 @@ __start_container() {
                        exit 1
                fi
        else
-               docker-compose -f $compose_file up -d $compose_args &> .dockererr
+               $docker_compose_cmd -f $compose_file up -d $compose_args &> .dockererr
                if [ $? -ne 0 ]; then
                        echo -e $RED"Problem to launch container(s) with docker-compose"$ERED
                        cat .dockererr
index 1048d76..aed9226 100644 (file)
 #
 
 # List of short names for all supported apps, including simulators etc
-APP_SHORT_NAMES="PA ICS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY DMAAPMED DMAAPADP PVCCLEANER KAFKAPC"
+APP_SHORT_NAMES="PA ICS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY DMAAPMED DMAAPADP PVCCLEANER KAFKAPC CHARTMUS HELMMANAGER LOCALHELM"
 
 # List of available apps that built and released of the project
-PROJECT_IMAGES="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
+PROJECT_IMAGES="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP HELMMANAGER"
 
 # List of available apps to override with local or remote staging/snapshot/release image
-AVAILABLE_IMAGES_OVERRIDE="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
+AVAILABLE_IMAGES_OVERRIDE="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP HELMMANAGER"
 
 # List of available apps where the image is built by the test environment
 LOCAL_IMAGE_BUILD="MR CR PRODSTUB KUBEPROXY HTTPPROXY KAFKAPC"
diff --git a/test/simulator-group/chartmuseum/.gitignore b/test/simulator-group/chartmuseum/.gitignore
new file mode 100644 (file)
index 0000000..7dc00c5
--- /dev/null
@@ -0,0 +1,3 @@
+.tmp.json
+.dockererr
+gen_docker-compose*
\ No newline at end of file
diff --git a/test/simulator-group/chartmuseum/app.yaml b/test/simulator-group/chartmuseum/app.yaml
new file mode 100644 (file)
index 0000000..3ce3a32
--- /dev/null
@@ -0,0 +1,53 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: $CHART_MUS_APP_NAME
+  namespace: $KUBE_SIM_NAMESPACE
+  labels:
+    run: $CHART_MUS_APP_NAME
+    autotest: CHARTMUS
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      run: $CHART_MUS_APP_NAME
+  template:
+    metadata:
+      labels:
+        run: $CHART_MUS_APP_NAME
+        autotest: CHARTMUS
+    spec:
+      containers:
+      - name: $CHART_MUS_APP_NAME
+        image: $CHART_MUS_IMAGE
+        imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+        ports:
+        - name: http
+          containerPort: $CHART_MUS_INTERNAL_PORT
+        env:
+        - name: STORAGE
+          value: "local"
+        - name: STORAGE_LOCAL_ROOTDIR
+          value: ${CHART_MUS_CHART_CONTR_CHARTS}
+        - name: DEBUG
+          value: "1"
+# Selector will be set when pod is started first time
+      nodeSelector:
+
diff --git a/test/simulator-group/chartmuseum/docker-compose.yml b/test/simulator-group/chartmuseum/docker-compose.yml
new file mode 100644 (file)
index 0000000..a336f1d
--- /dev/null
@@ -0,0 +1,37 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+version: '3.0'
+networks:
+  default:
+    external: true
+    name: ${DOCKER_SIM_NWNAME}
+services:
+  chartmuseum:
+    networks:
+      - default
+    container_name: ${CHART_MUS_APP_NAME}
+    image: ${CHART_MUS_IMAGE}
+    ports:
+      - ${CHART_MUS_EXTERNAL_PORT}:${CHART_MUS_INTERNAL_PORT}
+    environment:
+      - STORAGE=local
+      - STORAGE_LOCAL_ROOTDIR=${CHART_MUS_CHART_CONTR_CHARTS}
+      - DEBUG=1
+    labels:
+      - "nrttest_app=CHARTMUS"
+      - "nrttest_dp=${CHART_MUS_DISPLAY_NAME}"
diff --git a/test/simulator-group/chartmuseum/svc.yaml b/test/simulator-group/chartmuseum/svc.yaml
new file mode 100644 (file)
index 0000000..a5301f4
--- /dev/null
@@ -0,0 +1,34 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: $CHART_MUS_APP_NAME
+  namespace: $KUBE_SIM_NAMESPACE
+  labels:
+    run: $CHART_MUS_APP_NAME
+    autotest: CHARTMUS
+spec:
+  type: ClusterIP
+  ports:
+  - port: $CHART_MUS_EXTERNAL_PORT
+    targetPort: $CHART_MUS_INTERNAL_PORT
+    protocol: TCP
+    name: htt
+  selector:
+    run: $CHART_MUS_APP_NAME
\ No newline at end of file
diff --git a/test/simulator-group/helmmanager/.gitignore b/test/simulator-group/helmmanager/.gitignore
new file mode 100644 (file)
index 0000000..7dc00c5
--- /dev/null
@@ -0,0 +1,3 @@
+.tmp.json
+.dockererr
+gen_docker-compose*
\ No newline at end of file
diff --git a/test/simulator-group/helmmanager/app.yaml b/test/simulator-group/helmmanager/app.yaml
new file mode 100644 (file)
index 0000000..5c17169
--- /dev/null
@@ -0,0 +1,36 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: helmmanagerservice
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $HELM_MANAGER_APP_NAME
+    autotest: HELMMANAGER
+spec:
+  serviceAccountName: $HELM_MANAGER_SA_NAME
+  containers:
+  - name: $HELM_MANAGER_APP_NAME
+    image: $HELM_MANAGER_IMAGE
+    imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+    ports:
+    - name: http
+      containerPort: $HELM_MANAGER_INTERNAL_PORT
+    - name: https
+      containerPort: $HELM_MANAGER_INTERNAL_SECURE_PORT
diff --git a/test/simulator-group/helmmanager/docker-compose.yml b/test/simulator-group/helmmanager/docker-compose.yml
new file mode 100644 (file)
index 0000000..cc69ed0
--- /dev/null
@@ -0,0 +1,36 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+version: '3.0'
+networks:
+  default:
+    external: true
+    name: ${DOCKER_SIM_NWNAME}
+services:
+  chartmuseum:
+    networks:
+      - default
+    container_name: ${HELM_MANAGER_APP_NAME}
+    image: ${HELM_MANAGER_IMAGE}
+    ports:
+      - ${HELM_MANAGER_EXTERNAL_PORT}:${HELM_MANAGER_INTERNAL_PORT}
+      - ${HELM_MANAGER_EXTERNAL_SECURE_PORT}:${HELM_MANAGER_INTERNAL_SECURE_PORT}
+    volumes:
+      - ~/.kube:/root/.kube
+    labels:
+      - "nrttest_app=CHARTMUS"
+      - "nrttest_dp=${HELM_MANAGER_DISPLAY_NAME}"
diff --git a/test/simulator-group/helmmanager/sa.yaml b/test/simulator-group/helmmanager/sa.yaml
new file mode 100644 (file)
index 0000000..1769b7c
--- /dev/null
@@ -0,0 +1,42 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: $HELM_MANAGER_SA_NAME
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+  run: $HELM_MANAGER_APP_NAME
+  autotest: HELMMANAGER
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ${HELM_MANAGER_SA_NAME}-clusterrolebinding
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+  run: $HELM_MANAGER_APP_NAME
+  autotest: HELMMANAGER
+subjects:
+- kind: ServiceAccount
+  name: $HELM_MANAGER_SA_NAME
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+roleRef:
+ kind: ClusterRole
+ name: $HELM_MANAGER_CLUSTER_ROLE
+ apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
diff --git a/test/simulator-group/helmmanager/svc.yaml b/test/simulator-group/helmmanager/svc.yaml
new file mode 100644 (file)
index 0000000..3703697
--- /dev/null
@@ -0,0 +1,37 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: helmmanagerservice
+  namespace: $KUBE_NONRTRIC_NAMESPACE
+  labels:
+    run: $HELM_MANAGER_APP_NAME
+    autotest: HELMMANAGER
+spec:
+  ports:
+  - port: $HELM_MANAGER_EXTERNAL_PORT
+    targetPort: $HELM_MANAGER_INTERNAL_PORT
+    protocol: TCP
+    name: http
+  - port: $HELM_MANAGER_EXTERNAL_SECURE_PORT
+    targetPort: $HELM_MANAGER_INTERNAL_SECURE_PORT
+    protocol: TCP
+    name: https
+  selector:
+    run: $HELM_MANAGER_APP_NAME