ADD /config/keystore.jks /opt/app/policy-agent/etc/cert/keystore.jks
ADD /config/truststore.jks /opt/app/policy-agent/etc/cert/truststore.jks
-RUN chmod -R 777 /opt/app/policy-agent/config/
-RUN chmod -R 777 /opt/app/policy-agent/data/
+ARG user=nonrtric
+ARG group=nonrtric
-ADD target/${JAR} /opt/app/policy-agent/policy-agent.jar
-CMD ["java", "-jar", "/opt/app/policy-agent/policy-agent.jar"]
+RUN groupadd $user && \
+ useradd -r -g $group $user
+RUN chown -R $user:$group /opt/app/policy-agent
+RUN chown -R $user:$group /var/log/policy-agent
+
+USER ${user}
+ADD target/${JAR} /opt/app/policy-agent/policy-agent.jar
+CMD ["java", "-jar", "/opt/app/policy-agent/policy-agent.jar"]
\ No newline at end of file
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
- <version>2.5.3</version>
+ <version>2.6.2</version>
<relativePath />
</parent>
<groupId>org.o-ran-sc.nonrtric</groupId>
- <artifactId>policy-agent</artifactId>
- <version>2.3.0-SNAPSHOT</version>
+ <artifactId>a1-policy-management-service</artifactId>
+ <version>2.3.1-SNAPSHOT</version>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<java.version>11</java.version>
<springfox.version>3.0.0</springfox.version>
<immutable.version>2.8.2</immutable.version>
- <sdk.version>1.1.6</sdk.version>
- <swagger.version>2.1.6</swagger.version>
- <json.version>20190722</json.version>
- <commons-net.version>3.6</commons-net.version>
+ <swagger.version>2.1.12</swagger.version>
+ <json.version>20211205</json.version>
<maven-compiler-plugin.version>3.8.0</maven-compiler-plugin.version>
<formatter-maven-plugin.version>2.12.2</formatter-maven-plugin.version>
<spotless-maven-plugin.version>1.18.0</spotless-maven-plugin.version>
<docker-maven-plugin>0.30.0</docker-maven-plugin>
- <version.dmaap>1.1.11</version.dmaap>
<javax.ws.rs-api.version>2.1.1</javax.ws.rs-api.version>
<sonar-maven-plugin.version>3.7.0.1746</sonar-maven-plugin.version>
<jacoco-maven-plugin.version>0.8.5</jacoco-maven-plugin.version>
<exec-maven-plugin.version>3.0.0</exec-maven-plugin.version>
</properties>
<dependencies>
+ <dependency>
+ <groupId>commons-io</groupId>
+ <artifactId>commons-io</artifactId>
+ <version>2.11.0</version>
+ </dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
- <version>30.0-jre</version>
+ <version>31.0.1-jre</version>
</dependency>
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-ui</artifactId>
- <version>1.5.2</version>
+ <version>1.6.3</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>json</artifactId>
<version>${json.version}</version>
</dependency>
- <dependency>
- <groupId>commons-net</groupId>
- <artifactId>commons-net</artifactId>
- <version>${commons-net.version}</version>
- </dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-configuration-processor</artifactId>
<optional>true</optional>
</dependency>
- <dependency>
- <groupId>org.onap.dcaegen2.services.sdk.rest.services</groupId>
- <artifactId>cbs-client</artifactId>
- <version>${sdk.version}</version>
- </dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
- <dependency>
- <groupId>org.onap.dmaap.messagerouter.dmaapclient</groupId>
- <artifactId>dmaapClient</artifactId>
- <version>${version.dmaap}</version>
- </dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
<artifactId>javax.ws.rs-api</artifactId>
<inherited>false</inherited>
<executions>
<execution>
- <id>generate-policy-agent-image</id>
+ <id>generate-a1-policy-management-service-image</id>
<phase>package</phase>
<goals>
<goal>build</goal>
<pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
<images>

- </images>
- </configuration>
- </execution>
- <execution>
- <id>push-nonrtric-dmaap-mediator-producer-image</id>
- <goals>
- <goal>build</goal>
- <goal>push</goal>
- </goals>
- <configuration>
- <pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
- <pushRegistry>${env.CONTAINER_PUSH_REGISTRY}</pushRegistry>
- <images>
- 
- </images>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
}
fmt.Println("Registering consumer: ", jobInfo)
body, _ := json.Marshal(jobInfo)
- putErr := restclient.Put(fmt.Sprintf("http://localhost:8083/data-consumer/v1/info-jobs/job%v", port), body, &httpClient)
+ putErr := restclient.Put(fmt.Sprintf("https://localhost:8083/data-consumer/v1/info-jobs/job%v", port), body, &httpClient)
if putErr != nil {
fmt.Println("Unable to register consumer: ", putErr)
}
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ port := flag.Int("port", 8434, "The port this stub will listen on")
+ flag.Parse()
+ fmt.Println("Starting ICS stub on port ", *port)
+
+ r := mux.NewRouter()
+ r.HandleFunc("/data-producer/v1/info-types/{typeId}", handleTypeRegistration).Methods(http.MethodPut, http.MethodPut)
+ r.HandleFunc("/data-producer/v1/info-producers/{producerId}", handleProducerRegistration).Methods(http.MethodPut, http.MethodPut)
+ fmt.Println(http.ListenAndServe(fmt.Sprintf(":%v", *port), r))
+}
+
+func handleTypeRegistration(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ id, ok := vars["typeId"]
+ if ok {
+ fmt.Printf("Registered type %v with schema: %v\n", id, readBody(r))
+ }
+}
+
+func handleProducerRegistration(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ id, ok := vars["producerId"]
+ if ok {
+ fmt.Printf("Registered producer %v with data: %v\n", id, readBody(r))
+ }
+}
+
+func readBody(r *http.Request) string {
+ b, readErr := ioutil.ReadAll(r.Body)
+ if readErr != nil {
+ return fmt.Sprintf("Unable to read body due to: %v", readErr)
+ }
+ return string(b)
+}
#
#PMS
-PMS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-policy-agent"
-PMS_IMAGE_TAG="2.2.0"
+PMS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-a1-policy-management-service"
+PMS_IMAGE_TAG="2.3.1"
#A1_SIM
A1_SIM_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator"
-A1_SIM_IMAGE_TAG="2.1.0"
+A1_SIM_IMAGE_TAG="2.2.0"
#RAPP
RAPP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-r-app-catalogue"
-RAPP_IMAGE_TAG="1.0.0"
+RAPP_IMAGE_TAG="1.0.2"
#CONTROL_PANEL
CONTROL_PANEL_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-controlpanel"
-CONTROL_PANEL_IMAGE_TAG="2.2.0"
+CONTROL_PANEL_IMAGE_TAG="2.3.0"
#GATEWAY
NONRTRIC_GATEWAY_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-gateway"
#ICS
ICS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-information-coordinator-service"
-ICS_IMAGE_TAG="1.1.0"
+ICS_IMAGE_TAG="1.2.1"
#CONSUMER
CONSUMER_IMAGE_BASE="eexit/mirror-http-server"
#ORU
ORU_APP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-o-ru-closed-loop-recovery"
-ORU_APP_IMAGE_TAG="1.0.0"
+ORU_APP_IMAGE_TAG="1.0.1"
#DB
-DB_IMAGE_BASE="mysql/mysql-server"
-DB_IMAGE_TAG="5.6"
+DB_IMAGE_BASE="mariadb"
+DB_IMAGE_TAG="10.5"
#A1CONTROLLER
A1CONTROLLER_IMAGE_BASE="nexus3.onap.org:10002/onap/sdnc-image"
#DMAAP_MEDIATOR_GO
DMAAP_MEDIATOR_GO_BASE="nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer"
-DMAAP_MEDIATOR_GO_TAG="1.0.0"
+DMAAP_MEDIATOR_GO_TAG="1.0.1"
#DMAAP_MEDIATOR_JAVA
DMAAP_MEDIATOR_JAVA_BASE="nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor"
-DMAAP_MEDIATOR_JAVA_TAG="1.0.0-SNAPSHOT"
+DMAAP_MEDIATOR_JAVA_TAG="1.0.1-SNAPSHOT"
- 8433:8433
volumes:
- ./policy-service/config/application-policyagent.yaml:/opt/app/policy-agent/config/application.yaml:ro
- - ./policy-service/config/application_configuration.json:/opt/app/policy-agent/data/application_configuration.json:ro
+ - ./policy-service/config/application_configuration.json:/opt/app/policy-agent/data/application_configuration.json:rw
# For using own certs instead of the default ones (built into the container),
# place them in config/ directory, update the application-policyagent.yaml file, and uncomment the following lines
# - ./policy-service/config/keystore-policyagent.jks:/opt/app/policy-agent/etc/cert/keystore.jks:ro
services:
db:
- image: mysql/mysql-server:5.6
+ image: "${DB_IMAGE_BASE}:${DB_IMAGE_TAG}"
container_name: sdnc-db
networks:
- default
DMaaP Adaptor
=============
-The DMaaP Adaptor provides support for push delivery of any data received from DMaap or Kafka.
+The DMaaP Adaptor provides support for push delivery of any data received from DMaaP or Kafka.
See `DMaaP Adaptor API <./dmaap-adaptor-api.html>`_ for full details of the API.
"DMaaP Adaptor API", ":download:`link <../dmaap-adaptor-java/api/api.json>`", ":download:`link <../dmaap-adaptor-java/api/api.yaml>`"
+DMaaP Mediator Producer
+=======================
+
+The DMaaP Mediator Producer provides support for push delivery of any data received from DMaaP or Kafka.
+
+See `DMaaP Mediator Producer API <./dmaap-mediator-producer-api.html>`_ for full details of the API.
+
+The API is also described in Swagger-JSON and YAML:
+
+
+.. csv-table::
+ :header: "API name", "|swagger-icon|", "|yaml-icon|"
+ :widths: 10,5, 5
+
+ "DMaaP Mediator Producer API", ":download:`link <../dmaap-mediator-producer/api/swagger.json>`", ":download:`link <../dmaap-mediator-producer/api/swagger.yaml>`"
+
Non-RT-RIC App Catalogue (Initial)
==================================
'./rac-api.html', #Generated file that doesn't exist at link check.
'./ics-api.html', #Generated file that doesn't exist at link check.
'./dmaap-adaptor-api.html' #Generated file that doesn't exist at link check.
+ './dmaap-mediator-producer-api.html' #Generated file that doesn't exist at link check.
]
extensions = ['sphinxcontrib.redoc', 'sphinx.ext.intersphinx',]
'name': 'DMaaP Adaptor API',
'page': 'dmaap-adaptor-api',
'spec': '../dmaap-adaptor-java/api/api.json',
+ },
+ {
+ 'name': 'DMaaP Mediator Producer API',
+ 'page': 'dmaap-mediator-producer-api',
+ 'spec': '../dmaap-mediator-producer/api/swagger.json',
'embed': True,
}
]
This document provides a quickstart for developers of the Non-RT RIC parts.
-Additional developer guides are available on the `O-RAN SC NONRTRIC Developer wiki <https://wiki.o-ran-sc.org/display/RICNR/Release+E>`_
+Additional developer guides are available on the `O-RAN SC NONRTRIC Developer wiki <https://wiki.o-ran-sc.org/display/RICNR/Release+E>`_.
A1 Policy Management Service & SDNC/A1 Controller & A1 Adapter
--------------------------------------------------------------
-The A1 Policy Management Service is implemented in ONAP. For documentation see `ONAP CCSDK documentation <https://docs.onap.org/projects/onap-ccsdk-oran/en/latest/index.html>`_
+The A1 Policy Management Service is implemented in ONAP. For documentation see `ONAP CCSDK documentation <https://docs.onap.org/projects/onap-ccsdk-oran/en/latest/index.html>`_.
and `wiki <https://wiki.onap.org/pages/viewpage.action?pageId=84672221>`_.
Information Coordinator Service
DMaaP Adaptor Service
---------------------
-This is run in the same way as the Information Coordinator Service
+
+This Java implementation is run in the same way as the Information Coordinator Service.
The following properties in the application.yaml file have to be modified:
* server.ssl.key-store=./config/keystore.jks
* app.webclient.trust-store=./config/truststore.jks
* app.configuration-filepath=./src/test/resources/test_application_configuration.json
+DMaaP Mediator Producer
+-----------------------
+
+To build and run this Go implementation, see the README.md file under the folder "dmaap-mediator-producer" in the "nonrtric" repo.
+
O-DU & O-RU fronthaul recovery
------------------------------
-See the page in Wiki: `O-RU Fronthaul Recovery usecase <https://wiki.o-ran-sc.org/display/RICNR/O-RU+Fronthaul+Recovery+usecase>`_
+See the page in Wiki: `O-RU Fronthaul Recovery usecase <https://wiki.o-ran-sc.org/display/RICNR/O-RU+Fronthaul+Recovery+usecase>`_.
O-DU Slicing use cases
----------------------
-See the page in Wiki: `O-DU Slice Assurance usecase <https://wiki.o-ran-sc.org/display/RICNR/O-DU+Slice+Assurance+usecase>`_
+See the page in Wiki: `O-DU Slice Assurance usecase <https://wiki.o-ran-sc.org/display/RICNR/O-DU+Slice+Assurance+usecase>`_.
Helm Manager
------------
-See the page in Wiki: `Release E <https://wiki.o-ran-sc.org/display/RICNR/Release+E>`_
+See the page in Wiki: `Release E <https://wiki.o-ran-sc.org/display/RICNR/Release+E>`_.
Kubernetes deployment
=====================
-Non-RT RIC can be also deployed in a Kubernetes cluster, `it/dep repository <https://gerrit.o-ran-sc.org/r/admin/repos/it/dep>`_
+Non-RT RIC can be also deployed in a Kubernetes cluster, `it/dep repository <https://gerrit.o-ran-sc.org/r/admin/repos/it/dep>`_.
hosts deployment and integration artifacts. Instructions and helm charts to deploy the Non-RT-RIC functions in the
OSC NONRTRIC integrated test environment can be found in the *./nonrtric* directory.
-For more information on installation of NonRT-RIC in Kubernetes, see `Deploy NONRTRIC in Kubernetes <https://wiki.o-ran-sc.org/display/RICNR/Deploy+NONRTRIC+in+Kubernetes>`_
+For more information on installation of NonRT-RIC in Kubernetes, see `Deploy NONRTRIC in Kubernetes <https://wiki.o-ran-sc.org/display/RICNR/Deploy+NONRTRIC+in+Kubernetes>`_.
For more information see `Integration and Testing documentation on the O-RAN-SC wiki <https://docs.o-ran-sc.org/projects/o-ran-sc-it-dep/en/latest/index.html>`_.
-f policy-service/docker-compose.yaml
-f ics/docker-compose.yaml
-The example above is just an example to start some of the components.
-For more information on running and configuring the functions can be found in the README file in the "`docker-compose <https://gerrit.o-ran-sc.org/r/gitweb?p=nonrtric.git;a=tree;f=docker-compose>`__" folder, and on the `wiki page <https://wiki.o-ran-sc.org/display/RICNR/Release+E+-+Run>`_
+The example above is just an example to start some of the components.
+For more information on running and configuring the functions can be found in the README file in the "`docker-compose <https://gerrit.o-ran-sc.org/r/gitweb?p=nonrtric.git;a=tree;f=docker-compose>`__" folder, and on the `wiki page <https://wiki.o-ran-sc.org/display/RICNR/Release+E+-+Run+in+Docker>`_
Install with Helm
+++++++++++++++++
Summary
-------
-The Non-RealTime RIC (RAN Intelligent Controller) is an Orchestration and Automation function described by the O-RAN Alliance for non-real-time intelligent management of RAN (Radio Access Network) functions.
+The Non-RealTime RIC (RAN Intelligent Controller) is an Orchestration and Automation function described by the O-RAN Alliance for non-real-time intelligent management of RAN (Radio Access Network) functions.
-The primary goal of the Non-RealTime RIC is to support non-real-time radio resource management, higher layer procedure optimization, policy optimization in RAN, and providing guidance, parameters, policies and AI/ML models to support the operation of near-RealTime RIC functions in the RAN to achieve higher-level non-real-time objectives.
+The primary goal of the Non-RealTime RIC is to support non-real-time radio resource management, higher layer procedure optimization, policy optimization in RAN, and providing guidance, parameters, policies and AI/ML models to support the operation of near-RealTime RIC functions in the RAN to achieve higher-level non-real-time objectives.
-Non-RealTime RIC functions include service and policy management, RAN analytics and model-training for the near-RealTime RICs.
+Non-RealTime RIC functions include service and policy management, RAN analytics and model-training for the near-RealTime RICs.
The Non-RealTime RIC platform hosts and coordinates rApps (Non-RT RIC applications) to perform Non-RealTime RIC tasks.
-The Non-RealTime RIC also hosts the new R1 interface (between rApps and SMO/Non-RealTime-RIC services)
+The Non-RealTime RIC also hosts the new R1 interface (between rApps and SMO/Non-RealTime-RIC services).
The O-RAN-SC (OSC) NONRTRIC project provides concepts, architecture and reference implementations as defined and described by the `O-RAN Alliance <https://www.o-ran.org>`_ architecture.
The OSC NONRTRIC implementation communicates with near-RealTime RIC elements in the RAN via the A1 interface. Using the A1 interface the NONRTRIC will facilitate the provision of policies for individual UEs or groups of UEs; monitor and provide basic feedback on policy state from near-RealTime RICs; provide enrichment information as required by near-RealTime RICs; and facilitate ML model training, distribution and inference in cooperation with the near-RealTime RICs.
Non-RT-RIC Control Panel / NONRTRIC Dashboard
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Graphical user interface
+Graphical user interface.
* View and Manage A1 policies in the RAN (near-RT-RICs)
* Graphical A1 policy creation/editing is model-driven, based on policy type's JSON schema
Please refer the developer guide and the `Wiki <https://wiki.o-ran-sc.org/display/RICNR/>`_ to set up in your local environment.
-More details available at the `NONRTRIC-Portal documentation site <https://docs.o-ran-sc.org/projects/o-ran-sc-portal-nonrtric-controlpanel>`_
+More details available at the `NONRTRIC-Portal documentation site <https://docs.o-ran-sc.org/projects/o-ran-sc-portal-nonrtric-controlpanel>`_.
Information Coordination Service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A1 Controller Service above A1 Controller/Adaptor that provides:
-* Unified REST & DMaaP NBI APIs for managing A1 Policies in all near-RT-RICs
+* Unified REST & DMaaP NBI APIs for managing A1 Policies in all near-RT-RICs.
- + Query A1 Policy Types in near-RT-RICs
- + Create/Query/Update/Delete A1 Policy Instances in near-RT-RICs
- + Query Status for A1 Policy Instances
+ + Query A1 Policy Types in near-RT-RICs.
+ + Create/Query/Update/Delete A1 Policy Instances in near-RT-RICs.
+ + Query Status for A1 Policy Instances.
-* Maintains (persistent) cache of RAN's A1 Policy information
+* Maintains (persistent) cache of RAN's A1 Policy information.
- * Support RAN-wide view of A1 Policy information
- * Streamline A1 traffic
- * Enable (optional) re-synchronization after inconsistencies / near-RT-RIC restarts
- * Supports a large number of near-RT-RICs (& multi-version support)
+ * Support RAN-wide view of A1 Policy information.
+ * Streamline A1 traffic.
+ * Enable (optional) re-synchronization after inconsistencies / near-RT-RIC restarts.
+ * Supports a large number of near-RT-RICs (& multi-version support).
-* Converged ONAP & O-RAN-SC A1 Adapter/Controller functions in ONAP SDNC/CCSDK (Optionally deploy without A1 Adaptor to connect direct to near-RT-RICs)
-* Support for different Southbound connectors per near-RT-RIC - e.g. different A1 versions, different near-RT-RIC version, different A1 adapter/controllers supports different or proprietary A1 controllers/EMSs
+* Converged ONAP & O-RAN-SC A1 Adapter/Controller functions in ONAP SDNC/CCSDK (Optionally deploy without A1 Adaptor to connect direct to near-RT-RICs).
+* Support for different Southbound connectors per near-RT-RIC - e.g. different A1 versions, different near-RT-RIC version, different A1 adapter/controllers supports different or proprietary A1 controllers/EMSs.
-See also: `A1 Policy Management Service in ONAP <https://wiki.onap.org/pages/viewpage.action?pageId=84672221>`_
+See also: `A1 Policy Management Service in ONAP <https://wiki.onap.org/pages/viewpage.action?pageId=84672221>`_ .
Implementation:
-* Implemented as a Java Spring Boot application
+* Implemented as a Java Spring Boot application.
A1/SDNC Controller & A1 Adapter (Controller plugin)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Mediation point for A1 interface termination in SMO/NONRTRIC
-* Implemented as CCSDK OSGI Feature/Bundles
-* A1 REST southbound
-* RESTCONF Northbound
-* NETCONF YANG > RESTCONF adapter
-* SLI Mapping logic supported
-* Can be included in an any controller based on ONAP CCSDK
+Mediation point for A1 interface termination in SMO/NONRTRIC.
+
+* Implemented as CCSDK OSGI Feature/Bundles.
+* A1 REST southbound.
+* RESTCONF Northbound.
+* NETCONF YANG > RESTCONF adapter.
+* SLI Mapping logic supported.
+* Can be included in an any controller based on ONAP CCSDK.
-See also: `A1 Adapter/Controller Functions in ONAP <https://wiki.onap.org/pages/viewpage.action?pageId=84672221>`_
+See also: `A1 Adapter/Controller Functions in ONAP <https://wiki.onap.org/pages/viewpage.action?pageId=84672221>`_ .
A1 Interface / Near-RT-RIC Simulator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Stateful A1 test stub.
-* Used to create multiple stateful A1 providers (simulated near-rt-rics)
-* Supports A1-Policy and A1-Enrichment Information
-* Swagger-based northbound interface, so easy to change the A1 profile exposed (e.g. A1 version, A1 Policy Types, A1-E1 consumers, etc)
-* All A1-AP versions supported
+* Used to create multiple stateful A1 providers (simulated near-rt-rics).
+* Supports A1-Policy and A1-Enrichment Information.
+* Swagger-based northbound interface, so easy to change the A1 profile exposed (e.g. A1 version, A1 Policy Types, A1-E1 consumers, etc).
+* All A1-AP versions supported.
Implementation:
-* Implemented as a Python application
-* Repo: *sim/a1-interface*
+* Implemented as a Python application.
+* Repo: *sim/a1-interface*.
More details available at the `A1 Simulator documentation site <https://docs.o-ran-sc.org/projects/o-ran-sc-sim-a1-interface>`_
Non-RT-RIC (Spring Cloud) Service Gateway
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Support Apps to use A1 Services
+Support Apps to use A1 Services.
-* `Spring Cloud Gateway <https://cloud.spring.io/spring-cloud-gateway>`_ provides the library to build a basic API gateway
-* Exposes A1 Policy Management Service & Information Coordinator Service.
+* `Spring Cloud Gateway <https://cloud.spring.io/spring-cloud-gateway>`_ provides the library to build a basic API gateway.
+* Exposes A1 Policy Management Service & Information Coordinator Service.
* Additional predicates can be added in code or preferably in the Gateway yaml configuration.
Implementation:
-* Implemented as a Java Spring Cloud application
-* Repo: *portal/nonrtric-controlpanel*
+* Implemented as a Java Spring Cloud application.
+* Repo: *portal/nonrtric-controlpanel*.
Non-RT-RIC (Kong) Service Exposure Prototyping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Support Apps to use NONRTRIC, SMO and other App interfaces
-A building block for coming releases as the R1 Interface concept matures
+Support Apps to use NONRTRIC, SMO and other App interfaces.
+A building block for coming releases as the R1 Interface concept matures .
-* Support dynamic registration and exposure of service interfaces to Non-RT-RIC applications (& NONRTRIC Control panel)
-* Extends a static gateway function specifically for NONRTRIC Control panel (described above)
-* Initial version based on `Kong API Gateway <https://docs.konghq.com/gateway-oss>`_ function
-* Initial exposure candidates include A1 (NONRTRIC) services & O1 (OAM/SMO) services
+* Support dynamic registration and exposure of service interfaces to Non-RT-RIC applications (& NONRTRIC Control panel).
+* Extends a static gateway function specifically for NONRTRIC Control panel (described above).
+* Initial version based on `Kong API Gateway <https://docs.konghq.com/gateway-oss>`_ function.
+* Initial exposure candidates include A1 (NONRTRIC) services & O1 (OAM/SMO) services.
-NONRTRIC Kubernetes deployment - including Kong configurations can be found in the OSC `it/dep <https://gerrit.o-ran-sc.org/r/gitweb?p=it/dep.git;a=tree;f=nonrtric/helm/nonrtric>`_ Gerrit repo.
+NONRTRIC Kubernetes deployment - including Kong configurations can be found in the OSC `it/dep <https://gerrit.o-ran-sc.org/r/gitweb?p=it/dep.git;a=tree;f=nonrtric/helm/nonrtric>`_ Gerrit repo.
DMaaP/Kafka Information Producer Adapters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Configurable mediators to take information from DMaaP (& Kafka) and present it as a coordinated Information Producer
-
-These mediators/adapters are generic information producers, which registers themselves as an information producers of defined information types (in Information Coordination Service).
-The information types are defined in a configuration file.
-Information jobs defined using Information Coordination Service (ICS) then allow information consumers to retrieve data from DMaaP MR or Kafka topics (accessing the ICS API).
+Configurable mediators to take information from DMaaP and Kafka and present it as a coordinated Information Producer.
-Two alternative implementations to allow Information Consumers to consume DMaaP or Kafka events as coordinated Information Jobs.
+These mediators/adapters are generic information producers, which register themselves as information producers of defined information types in Information Coordination Service (ICS).
+The information types are defined in a configuration file.
+Information jobs defined using ICS then allow information consumers to retrieve data from DMaaP MR or Kafka topics (accessing the ICS API).
-Implementations:
+There are two alternative implementations to allow Information Consumers to consume DMaaP or Kafka events as coordinated Information Jobs.
-1. A version implemented in Java (Spring) - Supporting DMaaP and Kafka mediation
-2. A version implemented in Go - Supporting DMaaP mediation
+1. A version implemented in Java Spring (DMaaP Adaptor Service).
+2. A version implemented in Go (DMaaP Mediator Producer).
Initial Non-RT-RIC App Catalogue
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Register for Non-RT-RIC Apps.
-* Non-RT-RIC Apps can be registered / queried
-* Limited functionality/integration for now
-* *More work required in coming releases as the rApp concept matures*
+* Non-RT-RIC Apps can be registered / queried.
+* Limited functionality/integration for now.
+* *More work required in coming releases as the rApp concept matures*.
Initial K8S Helm Chart LCM Manager
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Onboard, start, stop, and modify Non-RT-RIC App µServices as Helm Charts
-*A building block for coming releases as the R-APP concept matures*
+Onboard, start, stop, and modify Non-RT-RIC App µServices as Helm Charts.
+*A building block for coming releases as the R-APP concept matures*.
-* Interfaces that accepts Non-RT-RIC App µServices Helm Charts
-* Support basic LCM operations
-* Onboard, Start, Stop, Modify, Monitor
-* Initial version co-developed with v. similar functions in ONAP
-* *Limited functionality/integration for now*
+* Interfaces that accepts Non-RT-RIC App µServices Helm Charts.
+* Support basic LCM operations.
+* Onboard, Start, Stop, Modify, Monitor.
+* Initial version co-developed with v. similar functions in ONAP.
+* *Limited functionality/integration for now*.
Test Framework
~~~~~~~~~~~~~~
-A full test environment with extensive test cases/scripts can be found in the ``test`` directory in the *nonrtric* source code
+A full test environment with extensive test cases/scripts can be found in the ``test`` directory in the *nonrtric* source code.
Use Cases
~~~~~~~~~
"Helloworld" O-RU Fronthaul Recovery use case
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A very simplified closed-loop rApp use case to re-establish front-haul connections between O-DUs and O-RUs if they fail. Not intended to to be 'real-world'
+A very simplified closed-loop rApp use case to re-establish front-haul connections between O-DUs and O-RUs if they fail. Not intended to to be 'real-world'.
"Helloworld" O-DU Slice Assurance use case
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-A very simplified closed-loop rApp use case to re-prioritize a RAN slice's radio resource allocation priority if sufficient throughput cannot be maintained. Not intended to to be 'real-world'
+A very simplified closed-loop rApp use case to re-prioritize a RAN slice's radio resource allocation priority if sufficient throughput cannot be maintained. Not intended to to be 'real-world'.
WORKDIR /opt/app/helm-manager
COPY target/app.jar app.jar
+ARG user=nonrtric
+ARG group=nonrtric
+
+RUN groupadd $group && \
+ useradd -r -g $group $user
+RUN chown -R $user:$group /opt/app/helm-manager
+RUN chown -R $user:$group /etc/app/helm-manager
+
+RUN mkdir /var/helm-manager-service
+RUN chown -R $user:$group /var/helm-manager-service
+
+RUN mkdir /home/$user
+RUN chown -R $user:$group /home/$user
+
+USER $user
+
CMD [ "java", "-jar", "app.jar", "--spring.config.location=optional:file:/etc/app/helm-manager/"]
--name helmmanagerservice \
--network nonrtric-docker-net \
-v $(pwd)/mnt/database:/var/helm-manager/database \
- -v ~/.kube:/root/.kube \
- -v ~/.helm:/root/.helm \
- -v ~/.config/helm:/root/.config/helm \
- -v ~/.cache/helm:/root/.cache/helm \
+ -v ~/.kube:/home/nonrtric/.kube \
+ -v ~/.helm:/home/nonrtric/.helm \
+ -v ~/.config/helm:/home/nonrtric/.config/helm \
+ -v ~/.cache/helm:/home/nonrtric/.cache/helm \
-v $(pwd)/config/KubernetesParticipantConfig.json:/opt/app/helm-manager/src/main/resources/config/KubernetesParticipantConfig.json \
-v $(pwd)/config/application.yaml:/opt/app/helm-manager/src/main/resources/config/application.yaml \
nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-helm-manager:1.1.0-SNAPSHOT
- name: helm-manager-service-pv
persistentVolumeClaim:
claimName: helm-manager-service-pvc
-
+ initContainers:
+ - name: change-ownership-container
+ image: busybox:latest
+ command: ["sh","-c","chown -R 999:1000 /var/helm-manager-service"]
+ resources: {}
+ volumeMounts:
+ - mountPath: /var/helm-manager-service
+ name: helm-manager-service-pv
---
apiVersion: v1
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
- <version>2.3.8.RELEASE</version>
+ <version>2.6.2</version>
<relativePath />
</parent>
<groupId>org.o-ran-sc.nonrtric</groupId>
<artifactId>helm-manager</artifactId>
<!-- Update this when stepping version of the helm-manager -->
- <version>1.1.0-SNAPSHOT</version>
+ <version>1.1.1-SNAPSHOT</version>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
</properties>
<dependencies>
<dependency>
- <groupId>org.onap.policy.clamp.participant</groupId>
- <artifactId>policy-clamp-participant-impl-kubernetes</artifactId>
- <version>${policy-clamp-participant-impl-kubernetes.version}</version>
+ <groupId>org.onap.policy.clamp.participant</groupId>
+ <artifactId>policy-clamp-participant-impl-kubernetes</artifactId>
+ <version>${policy-clamp-participant-impl-kubernetes.version}</version>
</dependency>
</dependencies>
<build>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<executions>
- <execution>
- <id>copy</id>
- <phase>package</phase>
- <goals>
- <goal>copy</goal>
- </goals>
- <configuration>
- <artifactItems>
- <artifactItem>
- <groupId>org.onap.policy.clamp.participant</groupId>
- <artifactId>policy-clamp-participant-impl-kubernetes</artifactId>
- <version>${policy-clamp-participant-impl-kubernetes.version}</version>
- <type>jar</type>
- <overWrite>true</overWrite>
- <outputDirectory>${basedir}/target</outputDirectory>
- <destFileName>app.jar</destFileName>
- </artifactItem>
- </artifactItems>
- </configuration>
- </execution>
+ <execution>
+ <id>copy</id>
+ <phase>package</phase>
+ <goals>
+ <goal>copy</goal>
+ </goals>
+ <configuration>
+ <artifactItems>
+ <artifactItem>
+ <groupId>org.onap.policy.clamp.participant</groupId>
+ <artifactId>policy-clamp-participant-impl-kubernetes</artifactId>
+ <version>${policy-clamp-participant-impl-kubernetes.version}</version>
+ <type>jar</type>
+ <overWrite>true</overWrite>
+ <outputDirectory>${basedir}/target</outputDirectory>
+ <destFileName>app.jar</destFileName>
+ </artifactItem>
+ </artifactItems>
+ </configuration>
+ </execution>
</executions>
</plugin>
<plugin>
<system>JIRA</system>
<url>https://jira.o-ran-sc.org/</url>
</issueManagement>
-</project>
\ No newline at end of file
+</project>
RUN mkdir -p /var/log/information-coordinator-service
RUN mkdir -p /opt/app/information-coordinator-service/etc/cert/
RUN mkdir -p /var/information-coordinator-service
-RUN chmod -R 777 /var/information-coordinator-service
EXPOSE 8083 8434
ADD /config/keystore.jks /opt/app/information-coordinator-service/etc/cert/keystore.jks
ADD /config/truststore.jks /opt/app/information-coordinator-service/etc/cert/truststore.jks
+ARG user=nonrtric
+ARG group=nonrtric
-RUN chmod -R 777 /opt/app/information-coordinator-service/config/
+RUN groupadd $user && \
+ useradd -r -g $group $user
+RUN chown -R $user:$group /opt/app/information-coordinator-service
+RUN chown -R $user:$group /var/log/information-coordinator-service
+RUN chown -R $user:$group /var/information-coordinator-service
+
+USER ${user}
CMD ["java", "-jar", "/opt/app/information-coordinator-service/information-coordinator-service.jar"]
}},
"/actuator/threaddump": {"get": {
"summary": "Actuator web endpoint 'threaddump'",
- "operationId": "handle_2_1_3",
+ "operationId": "threaddump_4",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
},
"/actuator/loggers": {"get": {
"summary": "Actuator web endpoint 'loggers'",
- "operationId": "handle_6",
+ "operationId": "loggers_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/health/**": {"get": {
"summary": "Actuator web endpoint 'health-path'",
- "operationId": "handle_12",
+ "operationId": "health-path_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/metrics/{requiredMetricName}": {"get": {
"summary": "Actuator web endpoint 'metrics-requiredMetricName'",
- "operationId": "handle_5",
+ "operationId": "metrics-requiredMetricName_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
"/actuator/loggers/{name}": {
"post": {
"summary": "Actuator web endpoint 'loggers-name'",
- "operationId": "handle_0",
+ "operationId": "loggers-name_3",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
},
"get": {
"summary": "Actuator web endpoint 'loggers-name'",
- "operationId": "handle_7",
+ "operationId": "loggers-name_4",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/metrics": {"get": {
"summary": "Actuator web endpoint 'metrics'",
- "operationId": "handle_4",
+ "operationId": "metrics_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/info": {"get": {
"summary": "Actuator web endpoint 'info'",
- "operationId": "handle_9",
+ "operationId": "info_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
},
"/actuator/logfile": {"get": {
"summary": "Actuator web endpoint 'logfile'",
- "operationId": "handle_8",
+ "operationId": "logfile_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/health": {"get": {
"summary": "Actuator web endpoint 'health'",
- "operationId": "handle_11",
+ "operationId": "health_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
}},
"/actuator/heapdump": {"get": {
"summary": "Actuator web endpoint 'heapdump'",
- "operationId": "handle_10",
+ "operationId": "heapdump_2",
"responses": {"200": {
"description": "OK",
"content": {"*/*": {"schema": {"type": "object"}}}
"version": "1.0"
},
"tags": [
+ {"name": "A1-EI (callbacks)"},
+ {
+ "name": "Data producer (callbacks)",
+ "description": "API implemented by data producers"
+ },
+ {"name": "Data consumer"},
+ {"name": "Data consumer (callbacks)"},
{
"name": "A1-EI (registration)",
"description": "Data consumer EI job registration"
"name": "A1-EI (callbacks)",
"description": "Data consumer EI job status callbacks"
},
- {
- "name": "Data producer (callbacks)",
- "description": "API implemented by data producers"
- },
+ {"name": "Service status"},
+ {"name": "A1-EI (registration)"},
+ {"name": "Data producer (registration)"},
+ {"name": "Data producer (callbacks)"},
{
"name": "Data producer (registration)",
"description": "API for data producers"
},
- {
- "name": "Service status",
- "description": "API for monitoring of the service"
- },
{
"name": "Data consumer",
"description": "API for data consumers"
},
+ {
+ "name": "Service status",
+ "description": "API for monitoring of the service"
+ },
{
"name": "Actuator",
"description": "Monitor and interact",
servers:
- url: /
tags:
+- name: A1-EI (callbacks)
+- name: Data producer (callbacks)
+ description: API implemented by data producers
+- name: Data consumer
+- name: Data consumer (callbacks)
- name: A1-EI (registration)
description: Data consumer EI job registration
- name: A1-EI (callbacks)
description: Data consumer EI job status callbacks
+- name: Service status
+- name: A1-EI (registration)
+- name: Data producer (registration)
- name: Data producer (callbacks)
- description: API implemented by data producers
- name: Data producer (registration)
description: API for data producers
-- name: Service status
- description: API for monitoring of the service
- name: Data consumer
description: API for data consumers
+- name: Service status
+ description: API for monitoring of the service
- name: Actuator
description: Monitor and interact
externalDocs:
tags:
- Actuator
summary: Actuator web endpoint 'threaddump'
- operationId: handle_2_1_3
+ operationId: threaddump_4
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'loggers'
- operationId: handle_6
+ operationId: loggers_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'health-path'
- operationId: handle_12
+ operationId: health-path_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'metrics-requiredMetricName'
- operationId: handle_5
+ operationId: metrics-requiredMetricName_2
parameters:
- name: requiredMetricName
in: path
tags:
- Actuator
summary: Actuator web endpoint 'loggers-name'
- operationId: handle_7
+ operationId: loggers-name_4
parameters:
- name: name
in: path
tags:
- Actuator
summary: Actuator web endpoint 'loggers-name'
- operationId: handle_0
+ operationId: loggers-name_3
parameters:
- name: name
in: path
tags:
- Actuator
summary: Actuator web endpoint 'metrics'
- operationId: handle_4
+ operationId: metrics_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'info'
- operationId: handle_9
+ operationId: info_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'logfile'
- operationId: handle_8
+ operationId: logfile_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'health'
- operationId: handle_11
+ operationId: health_2
responses:
200:
description: OK
tags:
- Actuator
summary: Actuator web endpoint 'heapdump'
- operationId: handle_10
+ operationId: heapdump_2
responses:
200:
description: OK
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
- <version>2.5.3</version>
+ <version>2.6.2</version>
<relativePath />
</parent>
<groupId>org.o-ran-sc.nonrtric</groupId>
<artifactId>information-coordinator-service</artifactId>
- <version>1.2.0-SNAPSHOT</version>
+ <version>1.2.1-SNAPSHOT</version>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<java.version>11</java.version>
<springfox.version>3.0.0</springfox.version>
<immutable.version>2.8.2</immutable.version>
- <sdk.version>1.1.6</sdk.version>
<swagger.version>2.1.6</swagger.version>
- <json.version>20190722</json.version>
- <commons-net.version>3.6</commons-net.version>
+ <json.version>20211205</json.version>
<maven-compiler-plugin.version>3.8.0</maven-compiler-plugin.version>
<formatter-maven-plugin.version>2.12.2</formatter-maven-plugin.version>
<spotless-maven-plugin.version>1.24.3</spotless-maven-plugin.version>
<swagger-codegen-maven-plugin.version>3.0.11</swagger-codegen-maven-plugin.version>
<docker-maven-plugin>0.30.0</docker-maven-plugin>
- <version.dmaap>1.1.11</version.dmaap>
- <javax.ws.rs-api.version>2.1.1</javax.ws.rs-api.version>
<sonar-maven-plugin.version>3.7.0.1746</sonar-maven-plugin.version>
<jacoco-maven-plugin.version>0.8.5</jacoco-maven-plugin.version>
<exec.skip>true</exec.skip>
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-ui</artifactId>
- <version>1.5.4</version>
+ <version>1.6.3</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>swagger-jaxrs2-servlet-initializer</artifactId>
<version>${swagger.version}</version>
</dependency>
- <dependency>
- <groupId>javax.xml.bind</groupId>
- <artifactId>jaxb-api</artifactId>
- </dependency>
<dependency>
<groupId>org.immutables</groupId>
<artifactId>value</artifactId>
<artifactId>json</artifactId>
<version>${json.version}</version>
</dependency>
- <dependency>
- <groupId>commons-net</groupId>
- <artifactId>commons-net</artifactId>
- <version>${commons-net.version}</version>
- </dependency>
- <dependency>
- <groupId>org.onap.dcaegen2.services.sdk.rest.services</groupId>
- <artifactId>cbs-client</artifactId>
- <version>${sdk.version}</version>
- </dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<scope>provided</scope>
</dependency>
- <dependency>
- <groupId>javax.ws.rs</groupId>
- <artifactId>javax.ws.rs-api</artifactId>
- <version>${javax.ws.rs-api.version}</version>
- </dependency>
<!-- https://mvnrepository.com/artifact/com.github.erosb/everit-json-schema -->
<dependency>
<groupId>com.github.erosb</groupId>
<java>
<removeUnusedImports />
<importOrder>
- <order>com,java,javax,org</order>
+ <order>com,java,org</order>
</importOrder>
</java>
</configuration>
<system>JIRA</system>
<url>https://jira.o-ran-sc.org/</url>
</issueManagement>
-</project>
\ No newline at end of file
+</project>
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
-
import javax.net.ssl.KeyManagerFactory;
import org.oransc.ics.configuration.WebClientConfig;
private final AsyncRestClient restClient;
private final InfoJobs eiJobs;
- private final InfoProducers eiProducers;
@Autowired
- public A1eCallbacks(ApplicationConfig config, InfoJobs eiJobs, InfoProducers eiProducers) {
+ public A1eCallbacks(ApplicationConfig config, InfoJobs eiJobs) {
AsyncRestClientFactory restClientFactory = new AsyncRestClientFactory(config.getWebClientConfig());
this.restClient = restClientFactory.createRestClientUseHttpProxy("");
this.eiJobs = eiJobs;
- this.eiProducers = eiProducers;
}
- public Flux<String> notifyJobStatus(Collection<InfoType> eiTypes) {
+ public Flux<String> notifyJobStatus(Collection<InfoType> eiTypes, InfoProducers eiProducers) {
return Flux.fromIterable(eiTypes) //
.flatMap(eiType -> Flux.fromIterable(this.eiJobs.getJobsForType(eiType))) //
.filter(eiJob -> !eiJob.getJobStatusUrl().isEmpty()) //
- .filter(eiJob -> this.eiProducers.isJobEnabled(eiJob) != eiJob.isLastStatusReportedEnabled())
- .flatMap(this::noifyStatusToJobOwner);
+ .filter(eiJob -> eiProducers.isJobEnabled(eiJob) != eiJob.isLastStatusReportedEnabled())
+ .flatMap(eiJob -> noifyStatusToJobOwner(eiJob, eiProducers));
}
- private Mono<String> noifyStatusToJobOwner(InfoJob job) {
- boolean isJobEnabled = this.eiProducers.isJobEnabled(job);
+ private Mono<String> noifyStatusToJobOwner(InfoJob job, InfoProducers eiProducers) {
+ boolean isJobEnabled = eiProducers.isJobEnabled(job);
A1eEiJobStatus status = isJobEnabled ? new A1eEiJobStatus(A1eEiJobStatus.EiJobStatusValues.ENABLED)
: new A1eEiJobStatus(A1eEiJobStatus.EiJobStatusValues.DISABLED);
String body = gson.toJson(status);
producerCallbacks.startInfoJobs(producer, this.infoJobs) //
.collectList() //
- .flatMapMany(list -> consumerCallbacks.notifyJobStatus(producer.getInfoTypes())) //
+ .flatMapMany(list -> consumerCallbacks.notifyJobStatus(producer.getInfoTypes(), this)) //
.collectList() //
- .flatMapMany(list -> consumerCallbacks.notifyJobStatus(previousTypes)) //
+ .flatMapMany(list -> consumerCallbacks.notifyJobStatus(previousTypes, this)) //
.subscribe();
return producer;
this.logger.error("Bug, no producer found");
}
}
- this.consumerCallbacks.notifyJobStatus(producer.getInfoTypes()) //
+ this.consumerCallbacks.notifyJobStatus(producer.getInfoTypes(), this) //
.subscribe();
}
.filter(infoJob -> !producer.isJobEnabled(infoJob)) //
.flatMap(infoJob -> producerCallbacks.startInfoJob(producer, infoJob, Retry.max(1)), MAX_CONCURRENCY) //
.collectList() //
- .flatMapMany(startedJobs -> consumerCallbacks.notifyJobStatus(producer.getInfoTypes())) //
+ .flatMapMany(startedJobs -> consumerCallbacks.notifyJobStatus(producer.getInfoTypes(), infoProducers)) //
.collectList();
}
-Subproject commit 6e31874958b44f45c5dd78aef5c783916b16c6ee
+Subproject commit 3d2a09b1bc7d6798c8083bfc3dc04c69a1b709c7
<module>r-app-catalogue</module>
<module>helm-manager</module>
<module>dmaap-adaptor-java</module>
- <module>dmaap-mediator-producer</module>
- <module>test/usecases/oruclosedlooprecovery/goversion</module>
- <module>test/usecases/odusliceassurance/goversion</module>
</modules>
<build>
<plugins>
ADD /config/r-app-catalogue-keystore.jks /opt/app/r-app-catalogue/etc/cert/keystore.jks
ADD target/${JAR} /opt/app/r-app-catalogue/r-app-catalogue.jar
+ARG user=nonrtric
+ARG group=nonrtric
-RUN chmod -R 777 /opt/app/r-app-catalogue/config/
+RUN groupadd $user && \
+ useradd -r -g $group $user
+RUN chown -R $user:$group /opt/app/r-app-catalogue
+RUN chown -R $user:$group /var/log/r-app-catalogue
+
+USER ${user}
CMD ["java", "-jar", "/opt/app/r-app-catalogue/r-app-catalogue.jar"]
summary: Services
operationId: getServices
responses:
- 200:
+ "200":
description: Services
content:
application/json:
type: string
example: DroneIdentifier
responses:
- 200:
+ "200":
description: Service
content:
application/json:
schema:
$ref: '#/components/schemas/service'
- 404:
+ "404":
description: Service is not found
content:
application/json:
$ref: '#/components/schemas/inputService'
required: true
responses:
- 200:
+ "200":
description: Service updated
- 201:
+ "201":
description: Service created
headers:
Location:
explode: false
schema:
type: string
- 400:
+ "400":
description: Provided service is not correct
content:
application/json:
schema:
$ref: '#/components/schemas/error_information'
example:
- detail: 'Service is missing required property: version'
+ detail: "Service is missing required property: version"
status: 400
deprecated: false
delete:
type: string
example: DroneIdentifier
responses:
- 204:
+ "204":
description: Service deleted
deprecated: false
components:
<parent>\r
<groupId>org.springframework.boot</groupId>\r
<artifactId>spring-boot-starter-parent</artifactId>\r
- <version>2.3.4.RELEASE</version>\r
+ <version>2.6.2</version>\r
<relativePath />\r
</parent>\r
<groupId>org.o-ran-sc.nonrtric</groupId>\r
<artifactId>r-app-catalogue</artifactId>\r
- <version>1.1.0-SNAPSHOT</version>\r
- <licenses>\r
+ <version>1.0.2-SNAPSHOT</version>\r
+ <licenses>\r
<license>\r
<name>The Apache Software License, Version 2.0</name>\r
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>\r
<swagger-annotations.version>1.5.22</swagger-annotations.version>\r
<springfox.version>2.9.2</springfox.version>\r
<jackson-databind-nullable.version>0.2.1</jackson-databind-nullable.version>\r
- <openapi-generator-maven-plugin.version>4.3.1</openapi-generator-maven-plugin.version>\r
- <swagger-codegen-maven-plugin.version>3.0.11</swagger-codegen-maven-plugin.version>\r
+ <openapi-generator-maven-plugin.version>5.3.1</openapi-generator-maven-plugin.version>\r
+ <swagger-codegen-maven-plugin.version>3.0.31</swagger-codegen-maven-plugin.version>\r
<formatter-maven-plugin.version>2.12.2</formatter-maven-plugin.version>\r
<spotless-maven-plugin.version>1.24.3</spotless-maven-plugin.version>\r
<jacoco-maven-plugin.version>0.8.6</jacoco-maven-plugin.version>\r
</plugin>\r
</plugins>\r
</build>\r
-</project>
\ No newline at end of file
+</project>\r
package org.oransc.rappcatalogue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThrows;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import javax.net.ssl.SSLContext;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.ssl.SSLContextBuilder;
-import org.junit.Test;
-import org.junit.runner.RunWith;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.http.ResponseEntity;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.test.context.TestPropertySource;
-import org.springframework.test.context.junit4.SpringRunner;
+import org.springframework.test.context.junit.jupiter.SpringExtension;
import org.springframework.util.ResourceUtils;
import org.springframework.web.client.ResourceAccessException;
-@RunWith(SpringRunner.class)
+@ExtendWith(SpringExtension.class)
@SpringBootTest(webEnvironment = WebEnvironment.RANDOM_PORT)
@TestPropertySource(
properties = { //
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="NGW CBS CONSUL"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_gateway $SIM_GROUP/$NRT_GATEWAY_COMPOSE_DIR/$NRT_GATEWAY_CONFIG_FILE
fi
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [[ $interface = *"SDNC"* ]]; then
start_sdnc
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ #Temporary switch to http/https if dmaap use. Otherwise it is not possibble to push config
+ if [ $__httpx == "HTTPS" ]; then
+ use_agent_rest_https
+ else
+ use_agent_rest_http
+ fi
+ api_put_configuration 200 ".consul_config.json"
+ if [ $__httpx == "HTTPS" ]; then
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_https
+ else
+ use_agent_rest_https
+ fi
+ else
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_http
+ else
+ use_agent_rest_http
+ fi
+ fi
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
mr_equal requests_submitted 0
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_cr 1
if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
+ if [[ "$PMS_FEATURE_LEVEL" != *"NOCONSUL"* ]]; then
+ start_consul_cbs
+ fi
fi
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ consul_config_app ".consul_config.json"
+ fi
fi
api_get_status 200
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_ric_simulators ricsim_g1 1 OSC_2.1.0
start_ric_simulators ricsim_g2 1 STD_1.1.3
+
+ sim_put_policy_type 201 ricsim_g1_1 1 testdata/OSC/sim_1.json
+ sim_put_policy_type 201 ricsim_g1_1 2 testdata/OSC/sim_2.json
+
if [ "$PMS_VERSION" == "V2" ]; then
start_ric_simulators ricsim_g3 1 STD_2.0.0
+ sim_put_policy_type 201 ricsim_g3_1 STD_QOS_0_2_0 testdata/STD2/sim_qos.json
+ sim_put_policy_type 201 ricsim_g3_1 STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
fi
start_mr
start_gateway $SIM_GROUP/$NRT_GATEWAY_COMPOSE_DIR/$NRT_GATEWAY_CONFIG_FILE
fi
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [[ $interface = *"SDNC"* ]]; then
start_sdnc
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
- fi
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ #Temporary switch to http/https if dmaap use. Otherwise it is not possibble to push config
+ if [ $__httpx == "HTTPS" ]; then
+ use_agent_rest_https
+ else
+ use_agent_rest_http
+ fi
+
+ if [[ $interface != *"DMAAP"* ]]; then
+ echo "{}" > ".consul_config_incorrect.json"
+ api_put_configuration 400 ".consul_config_incorrect.json"
+ fi
+
+ api_put_configuration 200 ".consul_config.json"
+ api_get_configuration 200 ".consul_config.json"
+ if [ $__httpx == "HTTPS" ]; then
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_https
+ else
+ use_agent_rest_https
+ fi
+ else
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_http
+ else
+ use_agent_rest_http
+ fi
+ fi
- sim_put_policy_type 201 ricsim_g1_1 1 testdata/OSC/sim_1.json
- sim_put_policy_type 201 ricsim_g1_1 2 testdata/OSC/sim_2.json
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
+ fi
if [ "$PMS_VERSION" == "V2" ]; then
- sim_put_policy_type 201 ricsim_g3_1 STD_QOS_0_2_0 testdata/STD2/sim_qos.json
- sim_put_policy_type 201 ricsim_g3_1 STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
-
api_equal json:rics 3 300
api_equal json:policy-types 5 120
api_get_status 200
+ api_get_status_root 200
+
echo "############################################"
echo "##### Service registry and supervision #####"
echo "############################################"
else
notificationurl=""
fi
+ if [[ $interface != *"DMAAP"* ]]; then
+ # Badly formatted json is not possible to send via dmaap
+ api_put_policy 400 "unregistered-service" ricsim_g1_1 1 2000 NOTRANSIENT $notificationurl testdata/OSC/pi_bad_template.json
+ fi
deviation "TR10 - agent allows policy creation on unregistered service (orig problem) - test combo $interface and $__httpx"
#Kept until decison
#api_put_policy 400 "unregistered-service" ricsim_g1_1 1 2000 NOTRANSIENT testdata/OSC/pi1_template.json
api_put_policy 200 "service10" ricsim_g3_1 STD_QOS2_0.1.0 5200 false $notificationurl testdata/STD2/pi_qos2_template.json
fi
+ api_get_policy_status 404 1
+ api_get_policy_status 404 2
VAL='NOT IN EFFECT'
api_get_policy_status 200 5000 OSC "$VAL" "false"
api_get_policy_status 200 5100 STD "UNDEFINED"
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_cr 1
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
if [ ! -z "$NRT_GATEWAY_APP_NAME" ]; then
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
set_agent_debug
CONDITIONALLY_IGNORED_IMAGES="NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=" "
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
#App names to include in the test when running kubernetes, space separated list
KUBE_INCLUDED_IMAGES="ICS PRODSTUB CP CR KUBEPROXY NGW"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
-KUBE_PRESTARTED_IMAGES=""
+KUBE_PRESTARTED_IMAGES="NGW"
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
CONDITIONALLY_IGNORED_IMAGES="NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_policy_agent PROXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
prepare_consul_config NOSDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
start_cr 1
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=" "
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-ISTANBUL"
+SUPPORTED_PROFILES="ONAP-ISTANBUL ONAP-JAKARTA"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER"
CONDITIONALLY_IGNORED_IMAGES=""
#Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_cr 1
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
if [ ! -z "$NRT_GATEWAY_APP_NAME" ]; then
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ #Temporary switch to http/https if dmaap use. Otherwise it is not possibble to push config
+ if [ $__httpx == "HTTPS" ]; then
+ use_agent_rest_https
+ else
+ use_agent_rest_http
+ fi
+ api_put_configuration 200 ".consul_config.json"
+ if [ $__httpx == "HTTPS" ]; then
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_https
+ else
+ use_agent_rest_https
+ fi
+ else
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_http
+ else
+ use_agent_rest_http
+ fi
+ fi
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
api_get_status 200
sim_equal ricsim_g1_1 num_instances 0
- sim_equal ricsim_g1_1 num_instances $NUM_POLICIES 300
+ if [[ $interface = *"SDNC"* ]]; then
+ deviation "Sync over SDNC seem to be slower from Jakarta version..."
+ sim_equal ricsim_g1_1 num_instances $NUM_POLICIES 2000
+ else
+ sim_equal ricsim_g1_1 num_instances $NUM_POLICIES 300
+ fi
START_ID2=$(($START_ID+$NUM_POLICIES))
sim_post_delete_instances 200 ricsim_g2_1
sim_equal ricsim_g2_1 num_instances 0
-
- sim_equal ricsim_g2_1 num_instances $NUM_POLICIES 300
+ if [[ $interface = *"SDNC"* ]]; then
+ deviation "Sync over SDNC seem to be slower from Jakarta version..."
+ sim_equal ricsim_g2_1 num_instances $NUM_POLICIES 2000
+ else
+ sim_equal ricsim_g2_1 num_instances $NUM_POLICIES 300
+ fi
api_delete_policy 204 $(($START_ID+47))
sim_post_delete_instances 200 ricsim_g1_1
- sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 300
+ if [[ $interface = *"SDNC"* ]]; then
+ deviation "Sync over SDNC seem to be slower from Jakarta version..."
+ sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 2000
+ else
+ sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 300
+ fi
api_delete_policy 204 $(($START_ID2+37))
sim_post_delete_instances 200 ricsim_g2_1
- sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 300
+ if [[ $interface = *"SDNC"* ]]; then
+ deviation "Sync over SDNC seem to be slower from Jakarta version..."
+ sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 2000
+
+ sim_equal ricsim_g2_1 num_instances $(($NUM_POLICIES-3)) 2000
+ else
+ sim_equal ricsim_g1_1 num_instances $(($NUM_POLICIES-2)) 300
- sim_equal ricsim_g2_1 num_instances $(($NUM_POLICIES-3)) 300
+ sim_equal ricsim_g2_1 num_instances $(($NUM_POLICIES-3)) 300
+ fi
api_equal json:policies $(($NUM_POLICIES-2+$NUM_POLICIES-3))
CONDITIONALLY_IGNORED_IMAGES=""
#Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
NUM_CR=10 # Number of callback receivers, divide all callbacks to this number of servers - for load sharing
## Note: The number jobs must be a multiple of the number of CRs in order to calculate the number of expected event in each CR
NUM_JOBS=200 # Mediator and adapter gets same number of jobs for every type
-
if [ $NUM_JOBS -lt $NUM_CR ]; then
__log_conf_fail_general "Number of jobs: $NUM_JOBS must be greater then the number of CRs: $NUM_CR"
fi
set_dmaapadp_trace
-start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_DATA_FILE
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ kafkapc_api_create_topic 201 "unauthenticated.dmaapmed_kafka.text" "text/plain"
+
+ kafkapc_api_start_sending 200 "unauthenticated.dmaapmed_kafka.text"
+fi
+
+start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_HOST_DATA_FILE
ics_equal json:data-producer/v1/info-producers 2 60
# Check producers
ics_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
-ics_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
ics_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
+if [[ "$DMAAP_MED_FEATURE_LEVEL" != *"KAFKATYPES"* ]]; then
+ ics_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
+else
+ ics_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka Kafka_TestTopic
+fi
# Create jobs for adapter - CR stores data as MD5 hash
cr_index=$(($i%$NUM_CR))
service_mr="CR_SERVICE_MR_PATH_"$cr_index
service_app="CR_SERVICE_APP_PATH_"$cr_index
- ics_api_idc_put_job 201 job-med-$i STD_Fault_Messages ${!service_mr}/job-med-data$i"?storeas=md5" info-owner-med-$i ${!service_app}/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+ ics_api_idc_put_job 201 job-med-$i STD_Fault_Messages ${!service_mr}/job-med-data$i"?storeas=md5" info-owner-med-$i ${!service_app}/job_status_info-owner-med-$i testdata/dmaap-mediator/job-template.json
done
print_timer
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ # Create jobs for mediator kafka - CR stores data as MD5 hash
+ start_timer "Create mediator (kafka) jobs: $NUM_JOBS"
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ cr_index=$(($i%$NUM_CR))
+ service_text="CR_SERVICE_TEXT_PATH_"$cr_index
+ service_app="CR_SERVICE_APP_PATH_"$cr_index
+ ics_api_idc_put_job 201 job-med-kafka-$i Kafka_TestTopic ${!service_text}/job-med-kafka-data$i"?storeas=md5" info-owner-med-kafka-$i ${!service_app}/job_status_info-owner-med-kafka-$i testdata/dmaap-mediator/job-template-1-kafka.json
+ done
+ print_timer
+fi
+
# Check job status
for ((i=1; i<=$NUM_JOBS; i++))
do
ics_api_a1_get_job_status 200 job-med-$i ENABLED 30
ics_api_a1_get_job_status 200 job-adp-$i ENABLED 30
ics_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ ics_api_a1_get_job_status 200 job-med-kafka-$i ENABLED 30
+ fi
done
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
# Check received data callbacks from adapter
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
# Check received data callbacks from adapter kafka
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
# Check received data callbacks from mediator
cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-data$i ./tmp/data_for_dmaap_test.json
done
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ ## Send text file via message-router to mediator kafka
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
+ kafkapc_api_post_msg_from_file 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" ./tmp/data_for_dmaap_test.txt
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 1 30
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
+ kafkapc_api_post_msg_from_file 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" ./tmp/data_for_dmaap_test.txt
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 2 30
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
+ kafkapc_api_post_msg_from_file 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" ./tmp/data_for_dmaap_test.txt
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 3 30
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
+ kafkapc_api_post_msg_from_file 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" ./tmp/data_for_dmaap_test.txt
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 4 30
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_DATA_DELIV))
+ kafkapc_api_post_msg_from_file 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" ./tmp/data_for_dmaap_test.txt
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 5 30
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+
+ # Check received data callbacks from adapter kafka
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ cr_index=$(($i%$NUM_CR))
+ cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 $cr_index job-med-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ done
+fi
# Send small json via message-router to adapter
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
print_timer
-# Send small text via message-routere to adapter
+# Send small text via message-router to adapter
kafkapc_api_post_msg 200 "unauthenticated.dmaapadp_kafka.text" "text/plain" 'Message-------1'
kafkapc_api_post_msg 200 "unauthenticated.dmaapadp_kafka.text" "text/plain" 'Message-------3'
kafkapc_equal topics/unauthenticated.dmaapadp_kafka.text/counters/sent 7 30
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
done
print_timer
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 100
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 100
done
print_timer
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ # Send small text via message-router to mediator
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" 'Message-------0'
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" 'Message-------2'
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 7 30
+
+ # Wait for data recetption, adapter kafka
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2/$NUM_CR+$EXPECTED_DATA_DELIV))
+ start_timer "Data delivery mediator kafka, 2 strings per job"
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 60
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 60
+ done
+ print_timer
+fi
+
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
cr_index=$(($i%$NUM_CR))
cr_equal $cr_index received_callbacks?id=job-med-data$i $DATA_DELIV_JOBS
+ cr_equal $cr_index received_callback_batches?id=job-med-data$i $DATA_DELIV_JOBS
cr_equal $cr_index received_callbacks?id=job-adp-data$i $DATA_DELIV_JOBS
+ cr_equal $cr_index received_callback_batches?id=job-adp-data$i $DATA_DELIV_JOBS
cr_equal $cr_index received_callbacks?id=job-adp-kafka-data$i $DATA_DELIV_JOBS
+ cr_equal $cr_index received_callback_batches?id=job-adp-kafka-data$i $DATA_DELIV_JOBS
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ cr_equal $cr_index received_callbacks?id=job-med-kafka-data$i $DATA_DELIV_JOBS
+ cr_equal $cr_index received_callback_batches?id=job-med-kafka-data$i $DATA_DELIV_JOBS
+ fi
done
# Check received data and order for mediator and adapter callbacks
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-data$i '{"msg":"msg-3"}'
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------1'
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------3'
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------0'
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------2'
+ fi
done
# Set delay in the callback receiver to slow down callbacks
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 100
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 100
done
print_timer
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 100
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 100
done
print_timer
for ((i=0; i<$NUM_CR; i++))
do
cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 100
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 100
done
print_timer
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ # Send small text via message-router to mediator kafka
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" 'Message-------4'
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text" "text/plain" 'Message-------6'
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text/counters/sent 9 30
+
+ # Wait for data recetption, mediator kafka
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2/$NUM_CR+$EXPECTED_DATA_DELIV))
+ start_timer "Data delivery mediator kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job"
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV 100
+ cr_equal $i received_callback_batches $EXPECTED_DATA_DELIV 100
+ done
+ print_timer
+fi
+
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
cr_index=$(($i%$NUM_CR))
cr_equal $cr_index received_callbacks?id=job-med-data$i 9
+ cr_equal $cr_index received_callback_batches?id=job-med-data$i 9
cr_equal $cr_index received_callbacks?id=job-adp-data$i 9
+ cr_equal $cr_index received_callback_batches?id=job-adp-data$i 9
cr_equal $cr_index received_callbacks?id=job-adp-kafka-data$i 9
+ cr_equal $cr_index received_callback_batches?id=job-adp-kafka-data$i 9
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ cr_equal $cr_index received_callbacks?id=job-med-kafka-data$i 9
+ cr_equal $cr_index received_callback_batches?id=job-med-kafka-data$i 9
+ fi
done
# Check received data and order for mediator and adapter callbacks
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-data$i '{"msg":"msg-7"}'
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------5'
cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------7'
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------4'
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------6'
+ fi
done
#### TEST COMPLETE ####
--- /dev/null
+#!/usr/bin/env bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter with 100 jobs,types and topics"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="ICS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR KAFKAPC HTTPPROXY"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES=" ICS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR KAFKAPC HTTPPROXY"
+
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh $@
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+#Local vars in test script
+##########################
+FLAT_A1_EI="1"
+NUM_CR=1 # Number of callback receivers, max 1
+## Note: The number jobs must be a multiple of the number of CRs in order to calculate the number of expected event in each CR
+NUM_JOBS=100 # Mediator and adapter gets same number of jobs for every type
+if [ $NUM_CR -gt 1 ]; then
+ __log_conf_fail_general "Max number of callback receivers is one in this test"
+fi
+
+clean_environment
+
+#use_cr_https
+use_cr_http
+use_ics_rest_https
+use_mr_https
+use_dmaapadp_https
+use_dmaapmed_https
+
+start_kube_proxy
+
+start_cr $NUM_CR
+
+start_ics NOPROXY $SIM_GROUP/$ICS_COMPOSE_DIR/$ICS_CONFIG_FILE
+
+set_ics_trace
+
+start_mr
+
+start_kafkapc
+
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ kafkapc_api_create_topic 201 "unauthenticated.dmaapadp_kafka.text$i" "text/plain"
+
+ kafkapc_api_start_sending 200 "unauthenticated.dmaapadp_kafka.text$i"
+done
+
+adp_med_type_list=""
+adp_config_data='{"types": ['
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ if [ $i -ne 1 ]; then
+ adp_config_data=$adp_config_data','
+ fi
+ adp_config_data=$adp_config_data'{"id": "ADPKafkaType'$i'","kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text'$i'","useHttpProxy": false}'
+ adp_med_type_list="$adp_med_type_list ADPKafkaType$i "
+done
+adp_config_data=$adp_config_data']}'
+echo $adp_config_data > tmp/adp_config_data.json
+
+start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE tmp/adp_config_data.json
+
+set_dmaapadp_trace
+
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ kafkapc_api_create_topic 201 "unauthenticated.dmaapmed_kafka.text$i" "text/plain"
+
+ kafkapc_api_start_sending 200 "unauthenticated.dmaapmed_kafka.text$i"
+ done
+fi
+
+med_config_data='{"types": ['
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ if [ $i -ne 1 ]; then
+ med_config_data=$med_config_data','
+ fi
+ med_config_data=$med_config_data'{"id": "MEDKafkaType'$i'","kafkaInputTopic": "unauthenticated.dmaapmed_kafka.text'$i'"}'
+ adp_med_type_list="$adp_med_type_list MEDKafkaType$i "
+done
+med_config_data=$med_config_data']}'
+echo $med_config_data > tmp/med_config_data.json
+
+start_dmaapmed NOPROXY tmp/med_config_data.json
+
+ics_equal json:data-producer/v1/info-producers 2 60
+
+# Check producers
+ics_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
+ics_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
+ics_api_idc_get_type_ids 200 $adp_med_type_list
+
+
+# Create jobs for adapter kafka - CR stores data as MD5 hash
+start_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ # Max buffer timeout for is about 160 sec for Adator jobs"
+ adp_timeout=$(($i*1000))
+ adp_config_data='{"filter":"Message*","maxConcurrency": 1,"bufferTimeout": {"maxSize": 100,"maxTimeMiliseconds": '$adp_timeout'}}'
+ echo $adp_config_data > tmp/adp_config_data.json
+
+ cr_index=$(($i%$NUM_CR))
+ service_text="CR_SERVICE_TEXT_PATH_"$cr_index
+ service_app="CR_SERVICE_APP_PATH_"$cr_index
+ ics_api_idc_put_job 201 job-adp-kafka-$i "ADPKafkaType$i" ${!service_text}/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i ${!service_app}/callbacks-null tmp/adp_config_data.json
+
+done
+print_timer
+
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ # Create jobs for mediator kafka - CR stores data as MD5 hash
+ start_timer "Create mediator (kafka) jobs: $NUM_JOBS"
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ med_timeout=$(($i*5000))
+ med_config_data='{"bufferTimeout": {"maxSize": 100,"maxTimeMiliseconds": '$med_timeout'}}'
+ echo $med_config_data > tmp/med_config_data.json
+ cr_index=$(($i%$NUM_CR))
+ service_text="CR_SERVICE_TEXT_PATH_"$cr_index
+ service_app="CR_SERVICE_APP_PATH_"$cr_index
+ ics_api_idc_put_job 201 job-med-kafka-$i "MEDKafkaType$i" ${!service_text}/job-med-kafka-data$i"?storeas=md5" info-owner-med-kafka-$i ${!service_app}/callbacks-null tmp/med_config_data.json
+ done
+ print_timer
+fi
+
+# Check job status
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ ics_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
+ if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+ ics_api_a1_get_job_status 200 job-med-kafka-$i ENABLED 30
+ fi
+done
+
+
+EXPECTED_DATA_DELIV=0 #Total delivered msg per CR
+EXPECTED_BATCHES_DELIV=0 #Total delivered batches per CR
+DATA_DELIV_JOBS=0 #Total delivered msg per job per CR
+
+sleep_wait 60
+
+start_timer "Data delivery adapter kafka, 2 strings per job (short buffer timeouts)"
+# Send small text via message-router to adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapadp_kafka.text"$i "text/plain" 'Message-------1'$i
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapadp_kafka.text"$i "text/plain" 'Discard-------3'$i #Should be filtered out
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapadp_kafka.text"$i "text/plain" 'Message-------3'$i
+done
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ kafkapc_equal topics/unauthenticated.dmaapadp_kafka.text$i/counters/sent 3 30
+done
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2/$NUM_CR+$EXPECTED_DATA_DELIV))
+EXPECTED_BATCHES_DELIV=$(($NUM_JOBS/$NUM_CR+$EXPECTED_BATCHES_DELIV))
+
+adp_timeout=$(($NUM_JOBS*1*2+60)) #NUM_JOBS*MIN_BUFFERTIMEOUT*2+60_SEC_DELAY
+for ((i=0; i<$NUM_CR; i++))
+do
+ #tmp_receptio
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV $adp_timeout
+ cr_greater_or_equal $i received_callback_batches $EXPECTED_BATCHES_DELIV
+done
+print_timer
+
+# Check received data callbacks from adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_index=$(($i%$NUM_CR))
+ cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------1'$i
+ cr_api_check_single_genric_event_md5 200 $cr_index job-adp-kafka-data$i 'Message-------3'$i
+done
+
+if [[ "$DMAAP_MED_FEATURE_LEVEL" == *"KAFKATYPES"* ]]; then
+
+ PREV_DATA_DELIV=$(cr_read 0 received_callbacks)
+ PREV_BATCHES_DELIV=$(cr_read 0 received_callback_batches)
+ start_timer "Data delivery mediator kafka, 2 strings per job (long buffer timeouts)"
+ # Send small text via message-router to mediator
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text$i" "text/plain" 'Message-------0'$i
+ kafkapc_api_post_msg 200 "unauthenticated.dmaapmed_kafka.text$i" "text/plain" 'Message-------2'$i
+ done
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ kafkapc_equal topics/unauthenticated.dmaapmed_kafka.text$i/counters/sent 2 30
+ done
+
+ # Wait for data recetption, adapter kafka
+
+ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2/$NUM_CR+$PREV_DATA_DELIV))
+ EXPECTED_BATCHES_DELIV=$(($NUM_JOBS/$NUM_CR+$PREV_BATCHES_DELIV))
+
+ med_timeout=$(($NUM_JOBS*5*2+60)) #NUM_JOBS*MIN_BUFFERTIMEOUT*2+60_SEC_DELAY
+ for ((i=0; i<$NUM_CR; i++))
+ do
+ cr_equal $i received_callbacks $EXPECTED_DATA_DELIV $med_timeout
+ cr_greater_or_equal $i received_callback_batches $EXPECTED_BATCHES_DELIV
+ done
+
+ print_timer
+
+ # Check received data callbacks from mediator
+ for ((i=1; i<=$NUM_JOBS; i++))
+ do
+ cr_index=$(($i%$NUM_CR))
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------0'$i
+ cr_api_check_single_genric_event_md5 200 $cr_index job-med-kafka-data$i 'Message-------2'$i
+ done
+fi
+
+#### TEST COMPLETE ####
+
+store_logs END
+
+print_result
+
+auto_clean_environment
TC_ONELINE_DESCR="Resync of RIC via changes in the consul config or pushed config"
#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM NGW KUBEPROXY"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM KUBEPROXY"
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER"
if [ "$PMS_VERSION" == "V2" ]; then
TESTED_VARIANTS="CONSUL NOCONSUL"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ TESTED_VARIANTS="NOCONSUL"
+ fi
else
TESTED_VARIANTS="CONSUL"
fi
fi
check_policy_agent_logs
- check_sdnc_logs
store_logs END_$consul_conf
done
TC_ONELINE_DESCR="Change supported policy types and reconfigure rics"
#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC NGW KUBEPROXY"
+DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR PA RICSIM SDNC KUBEPROXY"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL"
+
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_mr
if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
+ if [[ "$PMS_FEATURE_LEVEL" != *"NOCONSUL"* ]]; then
+ start_consul_cbs
+ fi
fi
# Create first config
prepare_consul_config NOSDNC ".consul_config_all.json"
fi
- start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
+ if [ $RUNMODE == "KUBE" ] && [[ "$PMS_FEATURE_LEVEL" == *"INITIALCONFIGMAP"* ]]; then
+ start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/application2.yaml
+ else
+ start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
+ fi
set_agent_trace
#Load first config
if [ $RUNMODE == "KUBE" ]; then
- agent_load_config ".consul_config_initial.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"INITIALCONFIGMAP"* ]]; then
+ api_put_configuration 200 ".consul_config_initial.json"
+ api_get_configuration 200 ".consul_config_initial.json"
+ else
+ agent_load_config ".consul_config_initial.json"
+ fi
else
- consul_config_app ".consul_config_initial.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config_initial.json"
+ api_get_configuration 200 ".consul_config_initial.json"
+ else
+ consul_config_app ".consul_config_initial.json"
+ fi
fi
for ((i=1; i<=${NUM_RICS}; i++))
#Load config with all rics
if [ $RUNMODE == "KUBE" ]; then
- agent_load_config ".consul_config_all.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"INITIALCONFIGMAP"* ]]; then
+ api_put_configuration 200 ".consul_config_all.json"
+ api_get_configuration 200 ".consul_config_all.json"
+ else
+ agent_load_config ".consul_config_all.json"
+ fi
else
- consul_config_app ".consul_config_all.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config_all.json"
+ api_get_configuration 200 ".consul_config_all.json"
+ else
+ consul_config_app ".consul_config_all.json"
+ fi
fi
api_equal json:rics 10 120
# Load config with reduced number of rics
if [ $RUNMODE == "KUBE" ]; then
- agent_load_config ".consul_config_initial.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"INITIALCONFIGMAP"* ]]; then
+ api_put_configuration 200 ".consul_config_initial.json"
+ api_get_configuration 200 ".consul_config_initial.json"
+ else
+ agent_load_config ".consul_config_initial.json"
+ fi
else
- consul_config_app ".consul_config_initial.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config_initial.json"
+ api_get_configuration 200 ".consul_config_initial.json"
+ else
+ consul_config_app ".consul_config_initial.json"
+ fi
fi
api_equal json:rics 8 120
# Load config with all rics
if [ $RUNMODE == "KUBE" ]; then
- agent_load_config ".consul_config_all.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"INITIALCONFIGMAP"* ]]; then
+ api_put_configuration 200 ".consul_config_all.json"
+ api_get_configuration 200 ".consul_config_all.json"
+ else
+ agent_load_config ".consul_config_all.json"
+ fi
else
- consul_config_app ".consul_config_all.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config_all.json"
+ api_get_configuration 200 ".consul_config_all.json"
+ else
+ consul_config_app ".consul_config_all.json"
+ fi
fi
api_equal json:rics 10 120
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+TC_ONELINE_DESCR="Test of Helm Manager"
+
+#App names to include in the test when running docker, space separated list
+DOCKER_INCLUDED_IMAGES="KUBEPROXY CHARTMUS LOCALHELM HELMMANAGER"
+
+#App names to include in the test when running kubernetes, space separated list
+KUBE_INCLUDED_IMAGES="KUBEPROXY CHARTMUS LOCALHELM HELMMANAGER"
+#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
+KUBE_PRESTARTED_IMAGES=""
+
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
+#Supported test environment profiles
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
+#Supported run modes
+SUPPORTED_RUNMODES="DOCKER KUBE"
+
+. ../common/testcase_common.sh $@
+
+setup_testenvironment
+
+#### TEST BEGIN ####
+
+clean_environment
+
+start_kube_proxy
+
+start_chart_museum
+
+localhelm_create_test_chart dummy
+
+localhelm_package_test_chart dummy
+
+chartmus_upload_test_chart dummy
+
+clean_and_create_namespace test-ns
+
+localhelm_installed_chart_release NOTINSTALLED test-release test-ns
+
+start_helm_manager
+
+helm_manager_api_get_charts 200 EMPTY
+
+helm_manager_api_exec_add_repo cm $CHART_MUS_SERVICE_PATH
+
+helm_manager_api_post_repo 201 cm $CHART_MUS_SERVICE_HTTPX $CHART_MUS_SERVICE_HOST $CHART_MUS_SERVICE_PORT
+
+helm_manager_api_post_onboard_chart 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_post_install_chart 201 dummy DEFAULT-VERSION
+
+localhelm_installed_chart_release INSTALLED test-release test-ns
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_uninstall_chart 204 dummy DEFAULT-VERSION
+
+helm_manager_api_get_charts 200 cm dummy DEFAULT-VERSION test-release test-ns
+
+helm_manager_api_delete_chart 204 dummy DEFAULT-VERSION
+
+helm_manager_api_get_charts 200 EMPTY
+
+localhelm_installed_chart_release NOTINSTALLED test-release test-ns
+
+#### TEST COMPLETE ####
+
+store_logs END
+
+print_result
+
+auto_clean_environment
+
+
+
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
mr_equal requests_submitted 0
+ sim_put_policy_type 201 ricsim_g1_1 1 testdata/OSC/sim_1.json
+ if [ "$PMS_VERSION" == "V2" ]; then
+ sim_put_policy_type 201 ricsim_g3_1 STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
+ fi
if [[ $interface == "SDNC" ]]; then
start_sdnc
prepare_consul_config SDNC ".consul_config.json"
prepare_consul_config NOSDNC ".consul_config.json"
fi
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
-
api_get_status 200
sim_print ricsim_g1_1 interface
sim_print ricsim_g3_1 interface
fi
- sim_put_policy_type 201 ricsim_g1_1 1 testdata/OSC/sim_1.json
-
if [ "$PMS_VERSION" == "V2" ]; then
- sim_put_policy_type 201 ricsim_g3_1 STD_QOS2_0.1.0 testdata/STD2/sim_qos2.json
-
api_equal json:policy-types 3 300 #Wait for the agent to refresh types from the simulators
else
api_equal json:policy_types 2 300 #Wait for the agent to refresh types from the simulators
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-ISTANBUL ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-ISTANBUL ONAP-JAKARTA ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
set_agent_debug
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [[ $interface = *"SDNC"* ]]; then
start_sdnc
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
start_cr 1
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_policy_agent NORPOXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
- agent_load_config ".consul_config.json"
+ agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
start_sdnc
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
set_agent_debug
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [[ $interface = *"SDNC"* ]]; then
start_sdnc
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
start_mr # Not used, but removes error messages from the agent log
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
use_agent_rest_http
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
prepare_consul_config NOSDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
api_get_status 200
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=" PA RICSIM CP ICS RC SDNC DMAAPMED DMAAPADP"
+#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
+#the image is not configured in the supplied env_file
+#Used for images not applicable to all supported profile
+CONDITIONALLY_IGNORED_IMAGES=""
+
#Supported test environment profiles
SUPPORTED_PROFILES="ORAN-E-RELEASE"
#Supported run modes
else
ics_api_edp_put_type_2 201 type1 testdata/ics/ei-type-1.json
ics_api_edp_get_type_2 200 type1
- ics_api_edp_get_type_ids 200 STD_Fault_Messages ExampleInformationTypeKafka ExampleInformationType type1
+
+ ics_api_edp_get_type_ids 200 type1
ics_api_edp_put_producer_2 201 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
ics_api_edp_put_producer_2 200 prod-a $CB_JOB/prod-a $CB_SV/prod-a type1
# Dmaap mediator and adapter
start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
-start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_DATA_FILE
+start_dmaapmed NOPROXY $SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_HOST_DATA_FILE
ics_equal json:data-producer/v1/info-producers 3 120
-ics_api_idc_get_type_ids 200 ExampleInformationType ExampleInformationTypeKafka STD_Fault_Messages type-1
+ics_equal json:data-producer/v1/info-types 4 30
+
+ics_api_idc_get_type_ids 200 ExampleInformationType ExampleInformationTypeKafka STD_Fault_Messages type1
ics_api_edp_get_producer_ids_2 200 NOTYPE prod-a DmaapGenericInfoProducer DMaaP_Mediator_Producer
ics_api_a1_get_job_status 200 jobz$i ENABLED 30
done
+sleep_wait 30 # Wait for mediator to listening to kafka
+
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
set_agent_trace
- if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
- fi
-
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ #Temporary switch to http/https if dmaap use. Otherwise it is not possibble to push config
+ if [ $__httpx == "HTTPS" ]; then
+ use_agent_rest_https
+ else
+ use_agent_rest_http
+ fi
+ api_put_configuration 200 ".consul_config.json"
+ if [ $__httpx == "HTTPS" ]; then
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_https
+ else
+ use_agent_rest_https
+ fi
+ else
+ if [[ $interface = *"DMAAP"* ]]; then
+ use_agent_dmaap_http
+ else
+ use_agent_rest_http
+ fi
+ fi
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
# Check that all rics are synced in
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-GUILIN ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
set_agent_trace
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
prepare_consul_config SDNC ".consul_config.json"
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
api_get_status 200
#Ignore image in DOCKER_INCLUDED_IMAGES, KUBE_INCLUDED_IMAGES if
#the image is not configured in the supplied env_file
#Used for images not applicable to all supported profile
-CONDITIONALLY_IGNORED_IMAGES="NGW"
+CONDITIONALLY_IGNORED_IMAGES="CBS CONSUL NGW"
#Supported test environment profiles
-SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ONAP-HONOLULU ONAP-ISTANBUL ONAP-JAKARTA ORAN-CHERRY ORAN-D-RELEASE ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
start_policy_agent PROXY $SIM_GROUP/$POLICY_AGENT_COMPOSE_DIR/$POLICY_AGENT_CONFIG_FILE
-if [ $RUNMODE == "DOCKER" ]; then
- start_consul_cbs
-fi
-
prepare_consul_config SDNC ".consul_config.json" #Change to NOSDNC if running PMS with proxy
if [ $RUNMODE == "KUBE" ]; then
agent_load_config ".consul_config.json"
else
- consul_config_app ".consul_config.json"
+ if [[ "$PMS_FEATURE_LEVEL" == *"NOCONSUL"* ]]; then
+ api_put_configuration 200 ".consul_config.json"
+ else
+ start_consul_cbs
+ consul_config_app ".consul_config.json"
+ fi
fi
start_cr 1
# Overview
The bash scripts in this dir are intended for function test of the Non-RT RIC in different configurations, using simulators when needed for the external interfaces.
-A few of the bash scripts are so called 'suites', These suite scripts calls a sequence of the other bash scripts.
+A few of the bash scripts are so called 'suites', These suite scripts calls a sequence of the other bash test scripts.
## Automated test scripts
FTC is short for Function Test Case. In addition, there are also other test scripts with other naming format used for demo setup etc (e.g PM_DEMO.sh).
The requirements, in terms of the execution enviroment, to run a script or a suite is to have docker, docker-compose and python3 installed (the scripts warns if not installed). As an option, the scripts can also be executed in a Minikube or Kubernetes installation. The additional requirement is to have a clean minikube/kubernetes installation, perferably with the kube dashboard installed.
-The scripts have been tested to work on both MacOS and Ubuntu using docker. They should work also in git-bash on windows (for docker) but only partly verified. Running using minikube has only been verified on Ubuntu and running on kubernetes has only been verified on MacOS.
+The scripts have been tested to work on both MacOS and Ubuntu using docker. They should work also in git-bash on windows (for docker) but only partly verified. Running using minikube has only been verified on Ubuntu and running on kubernetes has been verified on MacOS and Ubuntu. Successful sample tests has been made on google cloud.
## Configuration
-The test scripts uses configuration from a single file, found in `../common/test_env.sh`, which contains all needed configuration in terms of image names, image tags, ports, file paths, passwords etc. This file can be modified if needed. See the README.md in `../common/` for all details of the config file.
+The test scripts uses configuration from a single profile file, found in `../common/test_env-*.sh`, which contains all needed configuration in terms of image names, image tags, ports, file paths, passwords etc. There is one profile file for system (ORAN/ONAP) and release.
+If temporary changes are needed to the settings in a profile file, use an override file containing only the variable to override.
## How to run
The test scripts produce quite a number of logs; all container logs, a log of all http/htps calls from the test scripts including the payload, some configuration created during test and also a test case log (same as what is printed on the screen during execution). All these logs are stored in `logs/FTCXXX/`. So each test script is using its own log directory.
To test all components on a very basic level, run the demo test script(s) for the desired release.
-Note that oran tests only include components from oran.
+Note that oran tests only include components from oran (exception is the onap sdnc).
Note that onap test uses components from onap combined with released oran components available at that onap release (e.g. Honolulu contains onap images from honolulu and oran images from cherry)
The test script are number using these basic categories where 0-999 are releated to the policy managment and 1000-1999 are related to information management. 2000-2999 are for southbound http proxy. There are also demo test cases that test more or less all components. These test scripts does not use the numbering scheme below.
The numbering in each series corresponds to the following groupings
-1-99 - Basic sanity tests
+1-99 - Basic sanity tests, PMS
-100-199 - API tests
+100-199 - API tests, PMS
-300-399 - Config changes and sync
+300-399 - Config changes and sync, PMS
-800-899 - Stability and capacity test
+800-899 - Stability and capacity test, PMS
-900-999 - Misc test
+900-999 - Misc test, PMS
11XX - ICS API Tests
18XX - ICS Stability and capacity test
-2000 - Southbound http proxy tests
+20XX - Southbound http proxy tests
30XX - rApp tests
+40XX - Helm Manager tests
+
Suites
To get an overview of the available test scripts, use the following command to print the test script description:
DOCKER_INCLUDED_IMAGES=<list of used apps in this test case - for docker>
KUBE_INCLUDED_IMAGES=<list of used apps (started by the script) in this test case - for kube>
-KUBE_PRESTARTED_IMAGES=<list of used apps (prestartedd - i.e. not started by the script) in this test case - for kube>
+KUBE_PRESTARTED_IMAGES=<list of used apps (prestarte - i.e. not started by the script) in this test case - for kube>
SUPPORTED_PROFILES=<list of supported profile names>
#### TEST COMPLETE ####
+print_result
+
store_logs END
```
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+TS_ONELINE_DESCR="Test suite - PMS endpoint aegis image testing. Agent REST, DMAAP and SNDC controller resconf"
+
+. ../common/testsuite_common.sh
+
+suite_setup
+
+############# TEST CASES #################
+
+./FTC1.sh $@
+./FTC10.sh $@
+./FTC100.sh $@
+./FTC110.sh $@
+./FTC300.sh $@
+./FTC310.sh $@
+./FTC350.sh $@
+./FTC800.sh $@
+./FTC805.sh $@
+./FTC850.sh $@
+./FTC2001.sh $@
+
+##########################################
+
+suite_complete
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+# Override file for running the e-release helm recipe including all components
+
+
+POLICY_AGENT_IMAGE_BASE="aegis-onap-docker-local.artifactory.est.tech/onap/ccsdk-oran-a1policymanagementservice"
RAPP_CAT_EXTERNAL_PORT=9085
RAPP_CAT_EXTERNAL_SECURE_PORT=9086
+
+HELM_MANAGER_APP_NAME="helmmanager"
CONDITIONALLY_IGNORED_IMAGES=""
#Supported test environment profiles
-SUPPORTED_PROFILES="ORAN-E-RELEASE"
+SUPPORTED_PROFILES="ORAN-E-RELEASE ORAN-F-RELEASE"
#Supported run modes
SUPPORTED_RUNMODES="DOCKER KUBE"
--- /dev/null
+{
+ "scope": {
+ "ueId": "ueXXX",
+ "qosId": "qosXXX"
+ }
\ No newline at end of file
--- /dev/null
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "filter": {
+ "type": "string"
+ },
+ "maxConcurrency": {
+ "type": "integer"
+ },
+ "bufferTimeout": {
+ "type": "object",
+ "properties": {
+ "maxSize": {
+ "type": "integer"
+ },
+ "maxTimeMiliseconds": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "maxSize",
+ "maxTimeMiliseconds"
+ ]
+ }
+ },
+ "required": []
+}
\ No newline at end of file
--- /dev/null
+{
+ "bufferTimeout": {
+ "maxSize": 1,
+ "maxTimeMiliseconds": 0
+ }
+}
\ No newline at end of file
--- /dev/null
+{}
\ No newline at end of file
# Introduction #
-This dir contains most scripts needed for the auto-test environment. There are scripts with functions to adapt to the apis of the components of the Non-RT RIC; Policy Agent, A1 Controller and Ric (A1) simulator. The test environment supports both test with docker and kubernetes(still experimental)
+This dir contains most scripts needed for the auto-test environment. There are scripts with functions to adapt to the apis of the components of the Non-RT RIC; Policy Managerment Service, Information Coordinator Service , A1 simulator as well as other components and simulators. The test environment supports both test with docker and kubernetes.
Some of the scripts can also be used for other kinds of tests, for example basic tests.
## Overview for common test scripts and files ##
-`agent_api_functions.sh` \
-Contains functions for adapting towards the Policy Management Service (PMS) API, also via dmaap (using a message-router stub interface)
-
`api_curl.sh` \
A common curl based function for the agent and ics apis. Also partly used for the Callback receiver and RAPP Catalogue apis.
+`cbs_api_function.sh` \
+All functions are implemented in `consul_api_function.sh`.
+
+`chartmus_api_functions.sh` \
+Contains functions for managing a Chartmuseum instance.
+
+`clean-docker.sh` \
+Cleans all containers started by the test environment in docker.
+
`clean-kube.sh` \
Cleans all services, deployments, pods, replica set etc started by the test environment in kubernetes.
`compare_json.py` \
A python script to compare two json obects for equality. Note that the comparsion always sort json-arrays before comparing (that is, it does not care about the order of items within the array). In addition, the target json object may specify individual parameter values where equality is 'dont care'.
-`consul_cbs_function.sh` \
-Contains functions for managing Consul and CBS as well as create the configuration for the PMS.
-
-`control_panel_api_function.sh` \
-Contains functions for managing Control Panel.
+`consul_api_function.sh` \
+Contains functions for managing Consul and CBS.
-`controller_api_functions.sh` \
-Contains functions for adaping towards the A1-controller API.
+`count_json_elements.py` \
+A python script calculate the length of json array or size of a json dictionary'.
`count_json_elements.py` \
A python script returning the number of items in a json array.
+`cp_api_function.sh` \
+Contains functions for managing the Control Panel.
+
`cr_api_functions.sh` \
-Contains functions for adapting towards the Callback receiver for checking received callback event.
+Contains functions for adapting towards the Callback receiver for checking received callback events.
`create_policies_process.py` \
A python script to create a batch of policies. The script is intended to run in a number of processes to create policies in parallel.
`delete_policies_process.py` \
A python script to delete a batch of policies. The script is intended to run in a number of processes to delete policies in parallel.
+`dmaapadp_api_function.sh`
+Contains funnctions for managing the Dmaap Adaptor Service.
+
+`dmaapmed_api_function.sh`
+Contains funnctions for managing the Dmaap Mediator Service.
+
+`dmaapmr_api_function.sh`
+All functions are implemented in `mr_api_functions.sh`.
+
`do_curl_function.sh`
A script for executing a curl call with a specific url and optional payload. It also compare the response with an expected result in terms of response code and optional returned payload. Intended to be used by test script (for example basic test scripts of other components)
-`ics_api_functions.sh` \
-Contains functions for adapting towards the ICS API
-
`extract_sdnc_reply.py` \
A python script to extract the information from an sdnc (A1 Controller) reply json. Helper for the test environment.
-`gateway_api_functions.sh` \
-Contains functions for managing the Non-RT RIC Gateway
+`genstat.sh` \
+This script collects container statistics to a file. Works both in docker and kubernetes (only for docker runtime).
+
+`helmmanager_api_functions.sh` \
+Contains functions for managing and testing of the Helm Manager.
`http_proxy_api_functions.sh` \
-Contains functions for managing the Http Proxy
+Contains functions for managing the Http Proxy.
+
+`ics_api_functions.sh` \
+Contains functions for adapting towards the Information Coordinator Service API.
+
+`kafkapc_api_functions.sh` \
+Contains functions for managing the kafka producer/consumer. Kafka is started by the dmaap message router component.
`kube_proxy_api_functions.sh` \
-Contains functions for managing the Kube Proxy - to gain access to all services pod inside a kube cluster.
+Contains functions for managing the Kube Proxy - to gain access to all services pod inside a kube cluster or all containers in a private docker network.
+
+`localhelm_api_functions.sh` \
+Contains functions for helm access on localhost.
`mr_api_functions.sh` \
Contains functions for managing the MR Stub and the Dmaap Message Router
+`ngw_api_functions.sh` \
+Contains functions for managing the Non-RT RIC Gateway
+
+`pa_api_functions.sh` \
+Contains functions for adapting towards the Policy Management Service (PMS) API, also via dmaap (using a message-router stub interface)
+
`prodstub_api_functions.sh` \
Contains functions for adapting towards the Producer stub interface - simulates a producer.
-`rapp_catalogue_api_functions.sh` \
+`pvccleaner_api_functions.sh` \
+Contains functions for managing the PVC Cleaner (used for reset mounted volumes in kubernetes).
+
+`rc_api_functions.sh` \
Contains functions for adapting towards the RAPP Catalogue.
-`ricsimulator_api_functions.sh` \
+`ricsim_api_functions.sh` \
Contains functions for adapting towards the RIC (A1) simulator admin API.
+`sdnc_api_functions.sh` \
+Contains functions for adaping towards the SDNC (used as an A1 controller).
+
`test_env*.sh` \
Common env variables for test in the auto-test dir. All configuration of port numbers, image names and version etc shall be made in this file.
Used by the auto test scripts/suites but could be used for other test script as well. The test cases shall be started with the file for the intended target using command line argument '--env-file'.
Common functions for auto test cases in the auto-test dir. This script is the foundation of test auto environment which sets up images and enviroment variables needed by this script as well as the script adapting to the APIs.
The included functions are described in detail further below.
+`testengine_config.sh` \
+Configuration file to setup the applications (components and simulators) the test enviroment handles.
+
`testsuite_common.sh` \
Common functions for running two or more auto test scripts as a suite.
| __<app-short_name>_kube_scale_zero |
| __<app-short_name>_kube_scale_zero_and_wait |
| __<app-short_name>_kube_delete_all |
+| __<app-short_name>_store_docker_logs |
+| __<app-short_name>_initial_setup |
+| __<app-short_name>_statisics_setup |
+| __<app-short_name>_test_requirements |
In addition, all other functions used for testing of the application shall also be added to the file. For example functions to start the application, setting interface parameters as well as functions to send rest call towards the api of the application and validating the result.
| `--use-staging-image` | The script will use images from the nexus staging repo for the supplied apps, space separated list of app short names |
| `--use-release-image` | The script will use images from the nexus release repo for the supplied apps, space separated list of app short names |
| `--image-repo` | Url to optional image repo. Only locally built images will be re-tagged and pushed to this repo |
-| `-repo-policy` | Policy controlling which images to re-tag and push to image repo in param --image-repo. Can be set to 'local' (push on locally built images) or 'remote' (push locally built images and images from nexus repo). Default is 'local' |
+| `-repo-policy` | Policy controlling which images to re-tag and push to image repo in param --image-repo. Can be set to 'local' (push only locally built images) or 'remote' (push locally built images and images from nexus repo). Default is 'local' |
| `--cluster-timeout` | Optional timeout for cluster where it takes time to obtain external ip/host-name. Timeout in seconds |
| `--print-stats` | Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
| `--override <file>` | Override setting from the file supplied by --env-file |
| `--pre-clean` | Clean kube resouces when running docker and vice versa |
| `--gen-stats` | Collect container/pod runtime statistics |
+| `--delete-namespaces` | Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps. |
+| `--delete-containers` | Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only. |
+| `--endpoint-stats` | Collect http endpoint statistics |
| `help` | Print this info along with the test script description and the list of app short names supported |
## Function: setup_testenvironment ##
|--|
| None |
-## Function: indent1 ##
-
-Indent every line of a command output with one space char.
-| arg list |
-|--|
-| None |
-
-## Function: indent2 ##
-
-Indent every line of a command output with two space chars.
-| arg list |
-|--|
-| None |
-
## Function: print_result ##
Print a test report of an auto-test script.
Start a timer for time measurement. Only one timer can be running.
| arg list |
|--|
+| `<timer-message-to-print>` |
| None - but any args will be printed (It is good practice to use same args for this function as for the `print_timer`) |
## Function: print_timer ##
-Print the value of the timer (in seconds) previously started by 'start_timer'. (Note that timer is still running after this function). The result of the timer as well as the args to the function will also be printed in the test report.
+Print the value of the timer (in seconds) previously started by 'start_timer'. (Note that timer is still running after this function). The result of the timer as well as the arg to 'start_timer' will also be printed in the test report.
| arg list |
|--|
-| `<timer-message-to-print>` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<timer-message-to-print>` | Any text message to be printed along with the timer result.(It is good practice to use same args for this function as for the `start_timer`) |
+| None |
## Function: deviation ##
| `<sleep-time-in-sec>` | Number of seconds to sleep |
| `<any-text-in-quotes-to-be-printed>` | Optional. The text will be printed, if present |
-## Function: check_control_panel_logs ##
-
-Check the Control Panel log for any warnings and errors and print the count of each.
-| arg list |
-|--|
-| None |
-
## Function: store_logs ##
-Take a snap-shot of all logs for all running containers and stores them in `./logs/<ATC-id>`. All logs will get the specified prefix in the file name. In general, one of the last steps in an auto-test script shall be to call this function. If logs shall be taken several times during a test script, different prefixes shall be used each time.
+Take a snap-shot of all logs for all running containers/pods and stores them in `./logs/<ATC-id>`. All logs will get the specified prefix in the file name. In general, one of the last steps in an auto-test script shall be to call this function. If logs shall be taken several times during a test script, different prefixes shall be used each time.
| arg list |
|--|
| `<logfile-prefix>` |
|--|
| None |
-# Description of functions in agent_api_functions.sh #
-
-## General ##
-
-Both PMS version 1 and 2 are supported. The version is controlled by the env variable `$PMS_VERSION` set in the test env file.
-For api function in version 2, an url prefix is added if configured.
+# Description of functions in chartmus_api_function.sh #
-## Function: use_agent_rest_http ##
+## Function: start_chart_museum ##
-Use http for all API calls to the Policy Agent. This is the default.
+Start the Chart Museum
| arg list |
|--|
| None |
-## Function: use_agent_rest_https ##
+## Function: chartmus_upload_test_chart ##
-Use https for all API calls to the Policy Agent.
+Upload a package chart to chartmusem
| arg list |
|--|
-| None |
+| `<chart-name>` |
-## Function: use_agent_dmaap_http ##
+| parameter | description |
+| --------- | ----------- |
+| `<chart-name>` | Name of the chart to upload |
-Send and recieve all API calls to the Policy Agent over Dmaap via the MR over http.
+## Function: chartmus_delete_test_chart ##
+
+Delete a chart in chartmusem
| arg list |
|--|
-| None |
+| `<chart-name> [<version>]` |
-## Function: use_agent_dmaap_https ##
+| parameter | description |
+| --------- | ----------- |
+| `<chart-name>` | Name of the chart to delete |
+| `<version>` | Chart version, default is 0.1.0 |
-Send and recieve all API calls to the Policy Agent over Dmaap via the MR over https.
-| arg list |
-|--|
-| None |
-## Function: start_policy_agent ##
+# Description of functions in consul_api_function.sh #
+
+## Function: consul_config_app ##
+
+Function to load a json config from a file into consul for the Policy Agent
-Start the Policy Agent container or corresponding kube resources depending on docker/kube mode.
| arg list |
|--|
-| `<logfile-prefix>` |
-| (docker) `PROXY\|NOPROXY <config-file>` |
-| (kube) `PROXY\|NOPROXY <config-file> [ <data-file> ]` |
+| `<json-config-file>` |
| parameter | description |
| --------- | ----------- |
-| `PROXY` | Configure with http proxy, if proxy is started |
-| `NOPROXY` | Configure without http proxy |
-| `<config-file>`| Path to application.yaml |
-| `<data-file>` | Optional path to application_configuration.json |
+| `<json-config-file>` | The path to the json file to be loaded to Consul/CBS |
-## Function: agent_load_config ##
+## Function: start_consul_cbs ##
-Load the config into a config map (kubernetes only).
+Start the Consul and CBS containers
| arg list |
|--|
-| `<data-file>` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<data-file>` | Path to application_configuration.json |
+# Description of functions in cp_api_function.sh #
-## Function: set_agent_debug ##
+## Function: use_control_panel_http ##
-Configure the Policy Agent log on debug level. The Policy Agent must be running.
+Set http as the protocol to use for all communication to the Control Panel
| arg list |
|--|
| None |
-## Function: set_agent_trace ##
+## Function: use_control_panel_https ##
-Configure the Policy Agent log on trace level. The Policy Agent must be running.
+Set https as the protocol to use for all communication to the Control Panel
| arg list |
|--|
| None |
-## Function: use_agent_retries ##
+## Function: start_control_panel ##
-Configure the Policy Agent to make upto 5 retries if an API calls return any of the specified http return codes.
+Start the Control Panel container
| arg list |
|--|
-| `[<response-code>]*` |
+| None |
-## Function: check_policy_agent_logs ##
+# Description of functions in cr_api_functions.sh #
-Check the Policy Agent log for any warnings and errors and print the count of each.
+## Function: use_cr_http ##
+
+Use http for getting event from CR. The admin API is not affected. This is the default.
| arg list |
|--|
| None |
-## Function: api_equal() ##
-
-Tests if the array length of a json array in the Policy Agent simulator is equal to a target value.
-Without the timeout, the test sets pass or fail immediately depending on if the array length is equal to the target or not.
-With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the array length becomes equal to the target value or not.
-See the 'cr' dir for more details.
+## Function: use_cr_https ##
+Use https for getting event from CR. The admin API is not affected.
+Note: Not yet used as callback event is not fully implemented/deciced.
| arg list |
|--|
-| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<variable-name>` | Relative url. Example 'json:policy_types' - checks the json array length of the url /policy_types |
-| `<target-value>` | Target value for the length |
-| `<timeout-in-sec>` | Max time to wait for the length to reach the target value |
+## Function: start_cr ##
-## Function: api_get_policies() ##
+Start the Callback Receiver container in docker or kube depending on start mode.
+| arg list |
+|--|
+| None |
-Test of GET '/policies' or V2 GET '/v2/policy-instances' and optional check of the array of returned policies.
-To test the response code only, provide the response code parameter as well as the following three parameters.
-To also test the response payload add the 'NOID' for an expected empty array or repeat the last five/seven parameters for each expected policy.
+## Function: cr_equal ##
+Tests if a variable value in the Callback Receiver (CR) simulator is equal to a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
+See the 'cr' dir for more details.
| arg list |
|--|
-| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <policy-type-id>\|NOTYPE [ NOID \| [<policy-id> <ric-id> <service-id> EMPTY\|<policy-type-id> <template-file>]*]` |
+| `<cr-path-id> <variable-name> <target-value> [ <timeout-in-sec> ]` |
-| arg list V2 |
+| parameter | description |
+| --------- | ----------- |
+| `<cr-path-id>` | Variable index to CR |
+| `<variable-name>` | Variable name in the CR |
+| `<target-value>` | Target value for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
+
+## Function: cr_greater_or_equal ##
+Tests if a variable value in the Callback Receiver (CR) simulator is equal to or greater than a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to or greater than the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
+See the 'cr' dir for more details.
+| arg list |
|--|
-| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <policy-type-id>\|NOTYPE [ NOID \| [<policy-id> <ric-id> <service-id> EMPTY\|<policy-type-id> <transient> <notification-url> <template-file>]*]` |
+| `<cr-path-id> <variable-name> <target-value> [ <timeout-in-sec> ]` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<ric-id>` | Id of the ric |
-| `NORIC` | Indicator that no ric is provided |
-| `<service-id>` | Id of the service |
-| `NOSERVICE` | Indicator that no service id is provided |
-| `<policy-type-id>` | Id of the policy type |
-| `NOTYPE` | Indicator that no type id is provided |
-| `NOID` | Indicator that no policy id is provided - indicate empty list of policies|
-| `<policy-id>` | Id of the policy |
-| `EMPTY` | Indicate for the special empty policy type |
-| `transient` | Transient, true or false |
-| `notification-url` | Url for notifications |
-| `<template-file>` | Path to the template file for the policy (same template used when creating the policy) |
+| `<cr-path-id>` | Variable index to CR |
+| `<variable-name>` | Variable name in the CR |
+| `<target-value>` | Target value for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-## Function: api_get_policy() ##
+## Function: cr_contains_str ##
-Test of GET '/policy' or V2 GET '/v2/policies/{policy_id}' and optional check of the returned json payload.
-To test the the response code only, provide the expected response code and policy id.
-To test the contents of the returned json payload, add a path to the template file used when creating the policy.
+Tests if a variable value in the CR contains a target string.
+Without the timeout, the test sets pass or fail immediately depending on if the variable contains the target string or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value contains the target string or not.
+See the 'a1-interface' repo for more details.
| arg list |
|--|
-| `<response-code> <policy-id> [<template-file>]` |
+| `<cr-path-id> <variable-name> <target-value> [ <timeout-in-sec> ]` |
-| arg list V2|
-|--|
-| `<response-code> <policy-id> [ <template-file> <service-name> <ric-id> <policytype-id>\|NOTYPE <transient> <notification-url>\|NOURL ]` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<policy-id>` | Id of the policy |
-| `<template-file>` | Path to the template file for the policy (same template used when creating the policy) |
-| `<service-id>` | Id of the service |
-| `<ric-id>` | Id of the ric |
-| `<policy-type-id>` | Id of the policy type |
-| `NOTYPE` | Indicator that no type id is provided |
-| `transient` | Transient, true or false |
-| `notification-url` | Url for notifications |
-
-## Function: api_put_policy() ##
+| `<cr-path-id>` | Variable index to CR |
+| `<variable-name>` | Variable name in the CR |
+| `<target-value>` | Target substring for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-Test of PUT '/policy' or V2 PUT '/policies'.
-If more than one policy shall be created, add a count value to indicate the number of policies to create. Note that if more than one policy shall be created the provided policy-id must be numerical (will be used as the starting id).
+## Function: cr_read ##
+Reads the value of a variable in the CR simulator. The value is intended to be passed to a env variable in the test script.
+See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<response-code> <service-name> <ric-id> <policytype-id> <policy-id> <transient> <template-file> [<count>]` |
-
-| arg list V2 |
-|--|
-| `<response-code> <service-name> <ric-id> <policytype-id>\|NOTYPE <policy-id> <transient>\|NOTRANSIENT <notification-url>\|NOURL <template-file> [<count>]` |
+| `<cr-path-id> <variable-name>` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<service-id>` | Id of the service |
-| `<ric-id>` | Id of the ric |
-| `<policy-type-id>` | Id of the policy type |
-| `<policy-id>` | Id of the policy. This value shall be a numeric value if more than one policy shall be created |
-| `transient>` | Transient 'true' or 'false'. 'NOTRANSIENT' can be used to indicate using the default value (no transient value provided) |
-| `notification-url` | Url for notifications |
-|`NOURL`| Indicator for no url |
-| `<template-file>` | Path to the template file for the policy |
-| `<count>` | An optional count (default is 1). If a value greater than 1 is given, the policy ids will use the given policy id as the first id and add 1 to that id for each new policy |
-
-## Function: api_put_policy_batch() ##
-
-This tests the same as function 'api_put_policy' except that all put requests are sent to dmaap in one go and then the responses are polled one by one.
-If the agent api is not configured to use dmaap (see 'use_agent_dmaap', 'use_agent_rest_http' and 'use_agent_rest_https'), an error message is printed.
-For arg list and parameters, see 'api_put_policy'.
+| `<cr-path-id>` | Variable index to CR |
+| `<variable-name>` | Variable name in the CR |
-## Function: api_put_policy_parallel() ##
+## Function: cr_delay_callback ##
-This tests the same as function 'api_put_policy' except that the policy create is spread out over a number of processes and it only uses the agent rest API. The total number of policies created is determined by the product of the parameters 'number-of-rics' and 'count'. The parameter 'number-of-threads' shall be selected to be not evenly divisible by the product of the parameters 'number-of-rics' and 'count' - this is to ensure that one process does not handle the creation of all the policies in one ric.
+Function to configure write delay on callbacks. Delay given in seconds. Setting remains until removed.
| arg list |
|--|
-| `<response-code> <service-name> <ric-id-base> <number-of-rics> <policytype-id> <policy-start-id> <transient> <template-file> <count-per-ric> <number-of-threads>`
+| `<response-code> <cr-path-id> [<delay-in-seconds>]`|
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<cr-path-id>` | Variable index to CR |
+| `<delay-in-seconds>` | Delay in seconds. If omitted, the delay is removed |
+
+## Function: cr_api_check_all_sync_events ##
+
+Check the contents of all ric events received for a callback id.
| arg list |
|--|
-| `<response-code> <service-name> <ric-id-base> <number-of-rics> <policytype-id> <policy-start-id> <transient> <notification-url>\|NOURL <template-file> <count-per-ric> <number-of-threads>`
+| `<response-code> <cr-path-id> <id> [ EMPTY \| ( <ric-id> )+ ]` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<service-id>` | Id of the service |
-| `<ric-id-base>` | The base id of the rics, ie ric id without the sequence number. The sequence number is added during processing |
-| `<number-of-rics>` | The number of rics, assuming the first index is '1'. The index is added to the 'ric-id-base' id |
-| `<policy-type-id>` | Id of the policy type |
-| `<policy-start-id>` | Id of the policy. This value shall be a numeric value and will be the id of the first policy |
-| `transient>` | Transient 'true' or 'false'. 'NOTRANSIENT' can be used to indicate using the default value (no transient value provide) |
-| `notification-url` | Url for notifications |
-| `<template-file>` | Path to the template file for the policy |
-| `<count-per-ric>` | Number of policies per ric |
-| `<number-of-threads>` | Number of threads (processes) to run in parallel |
+| `<cr-path-id>` | Variable index for CR |
+| `<id>` | Id of the callback destination |
+| `EMPTY` | Indicator for an empty list |
+| `<ric-id>` | Id of the ric |
-## Function: api_delete_policy() ##
+## Function: cr_api_check_all_ics_events ##
-This tests the DELETE '/policy' or V2 DELETE '/v2/policies/{policy_id}'. Removes the indicated policy or a 'count' number of policies starting with 'policy-id' as the first id.
+Check the contents of all current status events for one id from ICS
| arg list |
|--|
-| `<response-code> <policy-id> [<count>]`
+| `<response-code> <cr-path-id> <id> [ EMPTY \| ( <status> )+ ]` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<policy-id>` | Id of the policy |
-| `<count>` | An optional count of policies to delete. The 'policy-id' will be the first id to be deleted. |
-
-## Function: api_delete_policy_batch() ##
-
-This tests the same as function 'api_delete_policy' except that all delete requests are sent to dmaap in one go and then the responses are polled one by one.
-If the agent api is not configured to used dmaap (see 'use_agent_dmaap', 'use_agent_rest_http' and 'use_agent_rest_https'), an error message is printed.
-For arg list and parameters, see 'api_delete_policy'.
+| `<cr-path-id>` | Variable index for CR |
+| `<id>` | Id of the callback destination |
+| `EMPTY` | Indicator for an empty list |
+| `<status>` | Status string |
-## Function: api_delete_policy_parallel() ##
+## Function: cr_api_check_all_ics_subscription_events ##
-This tests the same as function 'api_delete_policy' except that the policy delete is spread out over a number of processes and it only uses the agent rest API. The total number of policies deleted is determined by the product of the parameters 'number-of-rics' and 'count'. The parameter 'number-of-threads' shall be selected to be not evenly divisible by the product of the parameters 'number-of-rics' and 'count' - this is to ensure that one process does not handle the deletion of all the policies in one ric.
+Check the contents of all current subscription events for one id from ICS
| arg list |
|--|
-| `<response-code> <ric-id-base> <number-of-rics> <policy-start-id> <count-per-ric> <number-of-threads>`
+| `<response-code> <cr-path-id> <id> [ EMPTY | ( <type-id> <schema> <registration-status> )+ ]` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<ric-id-base>` | The base id of the rics, ie ric id without the sequence number. The sequence number is added during processing |
-| `<number-of-rics>` | The number of rics, assuming the first index is '1' |
-| `<policy-start-id>` | Id of the policy. This value shall be a numeric value and will be the id of the first policy |
-| `<count-per-ric>` | Number of policies per ric |
-| `<number-of-threads>` | Number of threads (processes) to run in parallel |
+| `<cr-path-id>` | Variable index for CR |
+| `<id>` | Id of the callback destination |
+| `EMPTY` | Indicator for an empty list |
+| `<type-id>` | Id of the data type |
+| `<schema>` | Path to typeschema file |
+| `<registration-status>` | Status string |
-## Function: api_get_policy_ids() ##
+## Function: cr_api_reset ##
-Test of GET '/policy_ids' or V2 GET '/v2/policies'.
-To test response code only, provide the response code parameter as well as the following three parameters.
-To also test the response payload add the 'NOID' for an expected empty array or repeat the 'policy-instance-id' for each expected policy id.
+Reset the callback receiver
| arg list |
|--|
-| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <type-id>\|NOTYPE ([<policy-instance-id]*\|NOID)` |
+| `<cr-path-id>` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<ric-id>` | Id of the ric |
-| `NORIC` | Indicator that no ric is provided |
-| `<service-id>` | Id of the service |
-| `NOSERVICE` | Indicator that no service id is provided |
-| `type-id>` | Id of the policy type |
-| `NOTYPE` | Indicator that no type id is provided |
-| `NOID` | Indicator that no policy id is provided - indicate empty list of policies|
-| `<policy-instance-id>` | Id of the policy |
+| `<cr-path-id>` | Variable index for CR |
-## Function: api_get_policy_schema() ##
+## Function: cr_api_check_all_genric_json_events ##
-Test of V2 GET '/v2/policy-types/{policyTypeId}' and optional check of the returned json schema.
-To test the response code only, provide the expected response code and policy type id.
-To test the contents of the returned json schema, add a path to a schema file to compare with.
+Check the contents of all json events for path
| arg list |
|--|
-| `<response-code> <policy-type-id> [<schema-file>]` |
+| `<response-code> <cr-path-id> <topic-url> (EMPTY | <json-msg>+ )` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<policy-type-id>` | Id of the policy type |
-| `<schema-file>` | Path to the schema file for the policy type |
+| `<cr-path-id>` | Variable index for CR |
+| `<topic-url>` | Topic url |
+| `EMPTY` | Indicator for an empty list |
+| `json-msg` | Json msg string to compare with |
-## Function: api_get_policy_schema() ##
+## Function: cr_api_check_single_genric_json_event ##
-Test of GET '/policy_schema' and optional check of the returned json schema.
-To test the response code only, provide the expected response code and policy type id.
-To test the contents of the returned json schema, add a path to a schema file to compare with.
+Check a single (oldest) json event (or none if empty) for path
| arg list |
|--|
-| `<response-code> <policy-type-id> [<schema-file>]` |
+| `<response-code> <cr-path-id> <topic-url> (EMPTY | <json-msg> )` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<policy-type-id>` | Id of the policy type |
-| `<schema-file>` | Path to the schema file for the policy type |
+| `<cr-path-id>` | Variable index for CR |
+| `<topic-url>` | Topic url |
+| `EMPTY` | Indicator for no msg |
+| `json-msg` | Json msg string to compare with |
-## Function: api_get_policy_schemas() ##
+## Function: cr_api_check_single_genric_event_md5 ##
-Test of GET '/policy_schemas' and optional check of the returned json schemas.
-To test the response code only, provide the expected response code and ric id (or NORIC if no ric is given).
-To test the contents of the returned json schema, add a path to a schema file to compare with (or NOFILE to represent an empty '{}' type)
+Check a single (oldest) json in md5 format (or none if empty) for path.
+Note that if a json message is given, it shall be compact, no ws except inside string.
+The MD5 will generate different hash if whitespace is present or not in otherwise equivalent json.
| arg list |
|--|
-| `<response-code> <ric-id>\|NORIC [<schema-file>\|NOFILE]*` |
+| `<response-code> <cr-path-id> <topic-url> (EMPTY | <data-msg> )` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<ric-id>` | Id of the ric |
-| `NORIC` | No ric id given |
-| `<schema-file>` | Path to the schema file for the policy type |
-| `NOFILE` | Indicate the template for an empty type |
+| `<cr-path-id>` | Variable index for CR |
+| `<topic-url>` | Topic url |
+| `EMPTY` | Indicator for no msg |
+| `data-msg` | msg string to compare with |
-## Function: api_get_policy_status() ##
+## Function: cr_api_check_single_genric_event_md5_file ##
-Test of GET '/policy_status' or V2 GET '/policies/{policy_id}/status'.
+Check a single (oldest) event in md5 format (or none if empty) for path.
+Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines.
+The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json
| arg list |
|--|
-| `<response-code> <policy-id> (STD\|STD2 <enforce-status>\|EMPTY [<reason>\|EMPTY])\|(OSC <instance-status> <has-been-deleted>)` |
+| `<response-code> <cr-path-id> <topic-url> (EMPTY | <data-file> )` |
| parameter | description |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-| `<policy-id>` | Id of the policy |
-| `STD` | Indicator of status of Standarized A1 |
-| `STD2` | Indicator of status of Standarized A1 version 2 |
-| `<enforce-status>` | Enforcement status |
-| `<reason>` | Optional reason |
-| `EMPTY` | Indicator of empty string status or reason |
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `<instance-status>` | Instance status |
-| `<has-been-deleted>` | Deleted status, true or false |
+| `<cr-path-id>` | Variable index for CR |
+| `<topic-url>` | Topic url |
+| `EMPTY` | Indicator for no msg |
+| `data-file` | path to file to compare with |
-## Function: api_get_policy_types() ##
+# Description of functions in dmaapadp_api_functions.sh #
-Test of GET '/policy_types' or V2 GET '/v2/policy-types' and optional check of the returned ids.
-To test the response code only, provide the expected response code and ric id (or NORIC if no ric is given).
-To test the contents of the returned json payload, add the list of expected policy type id (or 'EMPTY' for the '{}' type)
+## Function: use_dmaapadp_http ##
+
+Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
| arg list |
|--|
-| `<response-code> [<ric-id>\|NORIC [<policy-type-id>\|EMPTY [<policy-type-id>]*]]` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<ric-id>` | Id of the ric |
-| `NORIC` | No ric id given |
-| `<policy-type-id>` | Id of the policy type |
-| `EMPTY` | Indicate the empty type |
+## Function: use_dmaapadp_https ##
-## Function: api_get_status() ##
+Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
-Test of GET /status or V2 GET /status
+| arg list |
+|--|
+| None |
+
+## Function: start_dmaapadp ##
+
+Start the dmaap adator service container in docker or kube depending on running mode.
| arg list |
|--|
-| `<response-code>` |
+| (kube) `PROXY\|NOPROXY <config-file> [ <data-file> ]` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-
-## Function: api_get_ric() ##
+| `PROXY` | Configure with http proxy, if proxy is started |
+| `NOPROXY` | Configure without http proxy |
+| `<config-file>`| Path to application.yaml |
+| `<data-file>` | Optional path to application_configuration.json |
-Test of GET '/ric' or V2 GET '/v2/rics/ric'
-To test the response code only, provide the expected response code and managed element id.
-To test the returned ric id, provide the expected ric id.
+## Function: set_dmaapadp_trace ##
+Configure the dmaap adaptor service log on trace level. The app must be running.
| arg list |
|--|
-| `<reponse-code> <managed-element-id> [<ric-id>]` |
+| None |
-| arg list V2 |
+# Description of functions in dmaapmed_api_functions.sh #
+
+## Function: use_dmaapmed_http ##
+
+Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
+
+| arg list |
|--|
-| `<reponse-code> <management-element-id>\|NOME <ric-id>\|<NORIC> [<string-of-ricinfo>]` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<managed-element-id>` | Id of the managed element |
-| `NOME` | Indicator for no ME |
-| `ric-id` | Id of the ric |
-| `NORIC` | Indicator no RIC |
-| `string-of-ricinfo` | String of ric info |
+## Function: use_dmaapmed_https ##
-## Function: api_get_rics() ##
+Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
-Test of GET '/rics' or V2 GET '/v2/rics' and optional check of the returned json payload (ricinfo).
-To test the response code only, provide the expected response code and policy type id (or NOTYPE if no type is given).
-To test also the returned payload, add the formatted string of info in the returned payload.
-Format of ricinfo: <br>`<ric-id>:<list-of-mes>:<list-of-policy-type-ids>`<br>
-Example <br>`<space-separate-string-of-ricinfo> = "ricsim_g1_1:me1_ricsim_g1_1,me2_ricsim_g1_1:1,2,4 ricsim_g1_1:me2_........."`
+| arg list |
+|--|
+| None |
+
+## Function: start_dmaapmed ##
+
+Start the dmaap mediator service container in docker or kube depending on running mode.
| arg list |
|--|
-| `<reponse-code> <policy-type-id>\|NOTYPE [<space-separate-string-of-ricinfo>]` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<policy-type-id>` | Policy type id of the ric |
-| `NOTYPE>` | No type given |
-| `<space-separate-string-of-ricinfo>` | A space separated string of ric info - needs to be quoted |
+# Description of functions in httpproxy_api_functions.sh #
-## Function: api_put_service() ##
+## Function: use_http_proxy_http ##
-Test of PUT '/service' or V2 PUT '/service'.
+Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
| arg list |
|--|
-| `<response-code> <service-name> <keepalive-timeout> <callbackurl>` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<service-name>` | Service name |
-| `<keepalive-timeout>` | Timeout value |
-| `<callbackurl>` | Callback url |
+## Function: use_http_proxy_https ##
-## Function: api_get_services() ##
+Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
+| arg list |
+|--|
+| None |
-Test of GET '/service' or V2 GET '/v2/services' and optional check of the returned json payload.
-To test only the response code, omit all parameters except the expected response code.
-To test the returned json, provide the parameters after the response code.
+## Function: start_http_proxy ##
+Start the http proxy container in docker or kube depending on running mode.
| arg list |
|--|
-| `<response-code> [ (<query-service-name> <target-service-name> <keepalive-timeout> <callbackurl>) \| (NOSERVICE <target-service-name> <keepalive-timeout> <callbackurl> [<target-service-name> <keepalive-timeout> <callbackurl>]* )]` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<query-service-name>` | Service name for the query |
-| `<target-service-name>` | Target service name|
-| `<keepalive-timeout>` | Timeout value |
-| `<callbackurl>` | Callback url |
-| `NOSERVICE` | Indicator of no target service name |
+# Description of functions in helmmanager_api_functions.sh #
-## Function: api_get_service_ids() ##
+## Function: use_helm_manager_http ##
-Test of GET '/services' or V2 GET /'v2/services'. Only check of service ids.
+Use http for all API calls to the Helm Manager. This is the default protocol.
+| arg list |
+|--|
+| None |
+## Function: use_helm_manager_https ##
+
+Use https for all API calls to the Helm Manager.
| arg list |
|--|
-| `<response-code> [<service-name>]*` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<service-name>` | Service name |
+## Function: start_helm_manager ##
-## Function: api_delete_services() ##
+Start the Helm Manager container in docker or kube depending on running mode.
+| arg list |
+|--|
+| None |
-Test of DELETE '/services' or V2 DELETE '/v2/services/{serviceId}'
+## Function: helm_manager_api_get_charts ##
+Get all charts and compare the expected contents.
| arg list |
|--|
-| `<response-code> [<service-name>]*` |
+| `<response-code> [ EMPTY | ( <chart> <version> <namespace> <release> <repo> )+ ]` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<service-name>` | Service name |
+| `<response-code>` | Expected response code |
+| `EMPTY` | Indicator for empty list |
+| `<chart>`| Name of the chart |
+| `<version>`| Version of the chart |
+| `<namespace>`| Namespace to of the chart |
+| `<release>`| Release name of the chart |
+| `<repo>`| Repository of the chart |
-## Function: api_put_services_keepalive() ##
-
-Test of PUT '/services/keepalive' or V2 PUT '/v2/services/{service_id}/keepalive'
+## Function: helm_manager_api_post_repo ##
+Add repo to the helm manager.
| arg list |
|--|
-| `<response-code> <service-name>` |
+| `<response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<service-name>` | Service name |
-
-## Function: api_put_configuration() ##
+| `<response-code>` | Expected response code |
+| `<repo-name>` | Name of the repo |
+| `<repo-protocol>`| Protocol http or https |
+| `<repo-address>`| Host name of the repo |
+| `<repo-port>`| Host port of the repo |
-Test of PUT '/v2/configuration'
+## Function: helm_manager_api_post_onboard_chart ##
+Onboard a chart to the helm manager.
| arg list |
|--|
-| `<response-code> <config-file>` |
+| `<response-code> <repo> <chart> <version> <release> <namespace>` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<config-file>` | Path json config file |
+| `<response-code>` | Expected response code |
+| `<repo>`| Target repo of the chart |
+| `<chart>`| Name of the chart |
+| `<version>`| Version of the chart |
+| `<namespace>`| Namespace to of the chart |
+| `<release>`| Release name of the chart |
-## Function: api_get_configuration() ##
-
-Test of GET '/v2/configuration'
+## Function: helm_manager_api_post_install_chart ##
+Install an onboarded chart.
| arg list |
|--|
-| `<response-code> [<config-file>]` |
+| `<response-code> <chart> <version>` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<config-file>` | Path json config file to compare the retrieved config with |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart |
+| `<version>`| Version of the chart |
-# Description of functions in consul_cbs_function.sh #
+## Function: helm_manager_api_uninstall_chart ##
-## Function: consul_config_app ##
+Uninstall a chart.
+| arg list |
+|--|
+| `<response-code> <chart> <version>` |
-Function to load a json config from a file into consul for the Policy Agent
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart |
+| `<version>`| Version of the chart |
+
+## Function: helm_manager_api_delete_chart ##
+Delete a chart.
| arg list |
|--|
-| `<json-config-file>` |
+| `<response-code> <chart> <version>` |
| parameter | description |
| --------- | ----------- |
-| `<json-config-file>` | The path to the json file to be loaded to Consul/CBS |
+| `<response-code>` | Expected response code |
+| `<chart>`| Name of the chart |
+| `<version>`| Version of the chart |
-## Function: prepare_consul_config ##
+## Function: helm_manager_api_exec_add_repo ##
-Function to prepare a Consul config based on the previously configured (and started simulators). Note that all simulator must be running and the test script has to configure if http or https shall be used for the components (this is done by the functions 'use_simulator_http', 'use_simulator_https', 'use_sdnc_http', 'use_sdnc_https', 'use_mr_http', 'use_mr_https')
+Add repo in helm manager by helm using exec.
| arg list |
|--|
-| `<deviation-message-to-print>` |
+| `<repo-name> <repo-url>` |
| parameter | description |
| --------- | ----------- |
-| `SDNC\|NOSDNC` | Configure based on a1-controller (SNDC) or without a controller/adapter (NOSDNC) |
-| `<output-file>` | The path to the json output file containing the prepared config. This file is used in 'consul_config_app' |
+| `<response-code>` | Expected response code |
+| `<repo-name>`| Name of the repo |
+| `<repo-url>`| Full url to the repo. Url must be accessible by the container |
-## Function: start_consul_cbs ##
-Start the Consul and CBS containers
+# Description of functions in ics_api_functions.sh #
+
+## Function: use_ics_rest_http ##
+
+Use http for all API calls to the ICS. This is the default protocol.
| arg list |
|--|
| None |
-# Description of functions in control_panel_api_function.sh #
-
-## Function: use_control_panel_http ##
+## Function: use_ics_rest_https ##
-Set http as the protocol to use for all communication to the Control Panel
+Use https for all API calls to the ICS.
| arg list |
|--|
| None |
-## Function: use_control_panel_https ##
+## Function: use_ics_dmaap_http ##
-Set https as the protocol to use for all communication to the Control Panel
+Send and recieve all API calls to the ICS over Dmaap via the MR using http.
| arg list |
|--|
| None |
-## Function: start_control_panel ##
+## Function: use_ics_dmaap_https ##
-Start the Control Panel container
+Send and recieve all API calls to the ICS over Dmaap via the MR using https.
| arg list |
|--|
| None |
-# Description of functions in controller_api_functions.sh #
+## Function: start_ics ##
+
+Start the ICS container in docker or kube depending on running mode.
+| arg list |
+|--|
+| `PROXY|NOPROXY <config-file>` |
-The file contains a selection of the possible API tests towards the a1-controller
+| parameter | description |
+| --------- | ----------- |
+| `PROXY` | Configure with http proxy, if proxy is started |
+| `NOPROXY` | Configure without http proxy |
+| `<config-file>`| Path to application.yaml |
-## Function: use_sdnc_http ##
+## Function: stop_ics ##
-Use http for all API calls towards the SDNC A1 Controller. This is the default. Note that this function shall be called before preparing the config for Consul.
+Stop the ICS container.
| arg list |
|--|
| None |
-## Function: use_sdnc_https ##
+## Function: start_stopped_ics ##
-Use https for all API calls towards the SDNC A1 Controller. Note that this function shall be called before preparing the config for Consul.
+Start a previously stopped ics.
| arg list |
|--|
| None |
-## Function: start_sdnc ##
+## Function: set_ics_debug ##
-Start the SDNC A1 Controller container and its database container
+Configure the ICS log on debug level. The ICS must be running.
| arg list |
|--|
| None |
-## Function: check_sdnc_logs ##
+## Function: set_ics_trace ##
-Check the SDNC log for any warnings and errors and print the count of each.
+Configure the ICS log on trace level. The ICS must be running.
| arg list |
|--|
| None |
-## Function: controller_api_get_A1_policy_ids ##
-
-Test of GET policy ids towards OSC or STD type simulator.
-To test response code only, provide the response code, 'OSC' + policy type or 'STD'
-To test the response payload, include the ids of the expexted response.
+## Function: use_ics_retries ##
+Perform curl retries when making direct call to ICS for the specified http response codes
+Speace separated list of http response codes
| arg list |
|--|
-| `<response-code> (OSC <ric-id> <policy-type-id> [ <policy-id> [<policy-id>]* ]) \| ( STD <ric-id> [ <policy-id> [<policy-id>]* ]` |
+| `[<response-code>]*` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `<ric-id>` | Id of the ric |
-| `policy-type-id>` | Id of the policy type |
-| `<policy-id>` | Id of the policy |
-| `STD` | Indicator of status of Standarized A1 |
-
-## Function: controller_api_get_A1_policy_type ##
-
-Test of GET a policy type (OSC only)
-
-| arg list |
-|--|
-| `<response-code> OSC <ric-id> <policy-type-id> [<policy-type-file>]` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `<ric-id>` | Id of the ric |
-| `policy-type-id>` | Id of the policy type |
-| `policy-type-file>` | Optional schema file to compare the returned type with |
-
-## Function: controller_api_delete_A1_policy ##
-
-Deletes a policy instance
-
-| arg list |
-|--|
-| `(STD <ric-id> <policy-id>) \| (OSC <ric-id> <policy-type-id> <policy-id>)` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `STD` | Indicator of status of Standarized A1 |
-| `<ric-id>` | Id of the ric |
-| `<policy-id>` | Id of the policy |
-| `policy-type-id>` | Id of the policy type |
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `policy-type-file>` | Optional schema file to compare the returned type with |
-
-## Function: controller_api_put_A1_policy ##
-
-Creates a policy instance
-
-| arg list |
-|--|
-| `<response-code> (STD <ric-id> <policy-id> <template-file> ) \| (OSC <ric-id> <policy-type-id> <policy-id> <template-file>)` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `STD` | Indicator of status of Standarized A1 |
-| `<ric-id>` | Id of the ric |
-| `<policy-id>` | Id of the policy |
-| `<template-file>` | Path to the template file of the policy|
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `<policy-type-id>` | Id of the policy type |
-
-## Function: controller_api_get_A1_policy_status ##
-
-Checks the status of a policy
-
- arg list |
-|--|
-| `<response-code> (STD <ric-id> <policy-id> <enforce-status> [<reason>]) \| (OSC <ric-id> <policy-type-id> <policy-id> <instance-status> <has-been-deleted>)` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `STD` | Indicator of status of Standarized A1 |
-| `<ric-id>` | Id of the ric |
-| `<policy-id>` | Id of the policy |
-| `<enforce-status>` | Enforcement status |
-| `<reason>` | Optional reason |
-| `OSC` | Indicator of status of Non-Standarized OSC A1 |
-| `<policy-type-id>` | Id of the policy type |
-| `<instance-status>` | Instance status |
-| `<has-been-deleted>` | Deleted status, true or false |
-
-# Description of functions in cr_api_functions.sh #
-
-## Function: use_cr_http ##
-
-Use http for getting event from CR. The admin API is not affected. This is the default.
-| arg list |
-|--|
-| None |
-
-## Function: use_cr_https ##
-
-Use https for getting event from CR. The admin API is not affected.
-Note: Not yet used as callback event is not fully implemented/deciced.
-| arg list |
-|--|
-| None |
-
-## Function: start_cr ##
-
-Start the Callback Receiver container in docker or kube depending on start mode.
-| arg list |
-|--|
-| None |
-
-## Function: cr_equal ##
-
-Tests if a variable value in the Callback Receiver (CR) simulator is equal to a target value.
-Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to the target or not.
-With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
-See the 'cr' dir for more details.
-| arg list |
-|--|
-| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<variable-name>` | Variable name in the CR |
-| `<target-value>` | Target value for the variable |
-| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-
-## Function: cr_api_check_all_sync_events() ##
-
-Check the contents of all ric events received for a callback id.
-
-| arg list |
-|--|
-| `<response-code> <id> [ EMPTY \| ( <ric-id> )+ ]` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<id>` | Id of the callback destination |
-| `EMPTY` | Indicator for an empty list |
-| `<ric-id>` | Id of the ric |
-
-## Function: cr_api_check_all_ics_events() ##
-
-Check the contents of all current status events for one id from ICS
-
-| arg list |
-|--|
-| `<response-code> <id> [ EMPTY \| ( <status> )+ ]` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<id>` | Id of the callback destination |
-| `EMPTY` | Indicator for an empty list |
-| `<status>` | Status string |
-
-## Function: cr_api_check_all_ics_subscription_events() ##
-
-Check the contents of all current subscription events for one id from ICS
-
-| arg list |
-|--|
-| `<response-code> <id> [ EMPTY | ( <type-id> <schema> <registration-status> )+ ]` |
-
-| parameter | description |
-| --------- | ----------- |
-| `<response-code>` | Expected http response code |
-| `<id>` | Id of the callback destination |
-| `EMPTY` | Indicator for an empty list |
-| `<type-id>` | Id of the data type |
-| `<schema>` | Path to typeschema file |
-| `<registration-status>` | Status string |
-
-
-## Function: cr_api_reset() ##
-
-Reset the callback receiver
-
-| arg list |
-|--|
-| - |
-
-
-# Description of functions in ics_api_functions.sh #
-
-## Function: use_ics_rest_http ##
-
-Use http for all API calls to the ICS. This is the default protocol.
-| arg list |
-|--|
-| None |
-
-## Function: use_ics_rest_https ##
-
-Use https for all API calls to the ICS.
-| arg list |
-|--|
-| None |
-
-## Function: use_ics_dmaap_http ##
-
-Send and recieve all API calls to the ICS over Dmaap via the MR using http.
-| arg list |
-|--|
-| None |
-
-## Function: use_ics_dmaap_https ##
-
-Send and recieve all API calls to the ICS over Dmaap via the MR using https.
-| arg list |
-|--|
-| None |
-
-## Function: start_ics ##
-
-Start the ICS container in docker or kube depending on running mode.
-| arg list |
-|--|
-| None |
-
-## Function: stop_ics ##
-
-Stop the ICS container.
-| arg list |
-|--|
-| None |
-
-## Function: start_stopped_ics ##
-
-Start a previously stopped ics.
-| arg list |
-|--|
-| None |
-
-## Function: set_ics_debug ##
-
-Configure the ICS log on debug level. The ICS must be running.
-| arg list |
-|--|
-| None |
-
-## Function: set_ics_trace ##
-
-Configure the ICS log on trace level. The ICS must be running.
-| arg list |
-|--|
-| None |
+| `<response-code>` | Http response code to make retry for |
## Function: check_ics_logs ##
| `<target-value>` | Target value for the variable |
| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-## Function: ics_api_a1_get_job_ids() ##
+## Function: ics_api_a1_get_job_ids ##
-Test of GET '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs' and optional check of the array of returned job ids.
+Test of GET '/A1-EI/v1/eitypes/{eiTypeId}/eijobs' and optional check of the array of returned job ids.
To test the response code only, provide the response code parameter as well as a type id and an owner id.
To also test the response payload add the 'EMPTY' for an expected empty array or repeat the last parameter for each expected job id.
| `<job-id>` | Id of the expected job |
| `EMPTY` | The expected list of job id shall be empty |
-## Function: ics_api_a1_get_type() ##
+## Function: ics_api_a1_get_type ##
-Test of GET '/A1-EI​/v1​/eitypes​/{eiTypeId}' and optional check of the returned schema.
+Test of GET '/A1-EI/v1/eitypes/{eiTypeId}' and optional check of the returned schema.
To test the response code only, provide the response code parameter as well as the type-id.
To also test the response payload add a path to the expected schema file.
| `<type-id>` | Id of the EI type |
| `<schema-file>` | Path to a schema file to compare with the returned schema |
-## Function: ics_api_a1_get_type_ids() ##
+## Function: ics_api_a1_get_type_ids ##
-Test of GET '/A1-EI​/v1​/eitypes' and optional check of returned list of type ids.
+Test of GET '/A1-EI/v1/eitypes' and optional check of returned list of type ids.
To test the response code only, provide the response only.
To also test the response payload add the list of expected type ids (or EMPTY if the list is expected to be empty).
| `EMPTY` | The expected list of type ids shall be empty |
| `<type-id>` | Id of the EI type |
-## Function: ics_api_a1_get_job_status() ##
+## Function: ics_api_a1_get_job_status ##
-Test of GET '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs​/{eiJobId}​/status' and optional check of the returned status.
+Test of GET '/A1-EI/v1/eitypes/{eiTypeId}/eijobs/{eiJobId}/status' and optional check of the returned status.
To test the response code only, provide the response code, type id and job id.
To also test the response payload add the expected status.
| `<job-id>` | Id of the job |
| `<status>` | Expected status |
-## Function: ics_api_a1_get_job() ##
+## Function: ics_api_a1_get_job ##
-Test of GET '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs​/{eiJobId}' and optional check of the returned job.
+Test of GET '/A1-EI/v1/eitypes/{eiTypeId}/eijobs/{eiJobId}' and optional check of the returned job.
To test the response code only, provide the response code, type id and job id.
To also test the response payload add the remaining parameters.
| `<owner-id>` | Expected owner for the job |
| `<template-job-file>` | Path to a job template for job parameters of the job |
-## Function: ics_api_a1_delete_job() ##
+## Function: ics_api_a1_delete_job ##
-Test of DELETE '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs​/{eiJobId}'.
+Test of DELETE '/A1-EI/v1/eitypes/{eiTypeId}/eijobs/{eiJobId}'.
To test, provide all the specified parameters.
| arg list |
| `<type-id>` | Id of the EI type |
| `<job-id>` | Id of the job |
-## Function: ics_api_a1_put_job() ##
+## Function: ics_api_a1_put_job ##
-Test of PUT '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs​/{eiJobId}'.
+Test of PUT '/A1-EI/v1/eitypes/{eiTypeId}/eijobs/{eiJobId}'.
To test, provide all the specified parameters.
| arg list |
| `<owner-id>` | Owner of the job |
| `<template-job-file>` | Path to a job template for job parameters of the job |
-## Function: ics_api_edp_get_type_ids() ##
+## Function: ics_api_edp_get_type_ids ##
Test of GET '/ei-producer/v1/eitypes' or '/data-producer/v1/info-types' depending on ics version and an optional check of the returned list of type ids.
To test the response code only, provide the response code.
| `<type-id>` | Id of the type |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_get_producer_status() ##
+## Function: ics_api_edp_get_producer_status ##
Test of GET '/ei-producer/v1/eiproducers/{eiProducerId}/status' or '/data-producer/v1/info-producers/{infoProducerId}/status' depending on ics version and optional check of the returned status.
To test the response code only, provide the response code and producer id.
| `<producer-id>` | Id of the producer |
| `<status>` | The expected status string |
-## Function: ics_api_edp_get_producer_ids() ##
+## Function: ics_api_edp_get_producer_ids ##
Test of GET '/ei-producer/v1/eiproducers' and optional check of the returned producer ids.
To test the response code only, provide the response.
| `<producer-id>` | Id of the producer |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_get_producer_ids_2() ##
+## Function: ics_api_edp_get_producer_ids_2 ##
Test of GET '/ei-producer/v1/eiproducers' or '/data-producer/v1/info-producers' depending on ics version and optional check of the returned producer ids.
To test the response code only, provide the response.
| `<producer-id>` | Id of the producer |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_get_type() ##
+## Function: ics_api_edp_get_type ##
Test of GET '/ei-producer/v1/eitypes/{eiTypeId}' and optional check of the returned type.
To test the response code only, provide the response and the type-id.
| `<producer-id>` | Id of the producer |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_get_type_2() ##
+## Function: ics_api_edp_get_type_2 ##
Test of GET '/ei-producer/v1/eitypes/{eiTypeId}' or '/data-producer/v1/info-types/{infoTypeId}' depending on ics version and optional check of the returned type.
To test the response code only, provide the response and the type-id.
| `<job-schema-file>` | Path to a job schema file |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_put_type_2() ##
+## Function: ics_api_edp_put_type_2 ##
Test of PUT '/ei-producer/v1/eitypes/{eiTypeId}' or '/data-producer/v1/info-types/{infoTypeId}' depending on ics version and optional check of the returned type.
| `<job-schema-file>` | Path to a job schema file |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_edp_delete_type_2() ##
+## Function: ics_api_edp_delete_type_2 ##
Test of DELETE '/ei-producer/v1/eitypes/{eiTypeId}' or '/data-producer/v1/info-types/{infoTypeId}' depending on ics version and optional check of the returned type.
| `<response-code>` | Expected http response code |
| `<type-id>` | Id of the type |
-## Function: ics_api_edp_get_producer() ##
+## Function: ics_api_edp_get_producer ##
Test of GET '/ei-producer/v1/eiproducers/{eiProducerId}' and optional check of the returned producer.
To test the response code only, provide the response and the producer-id.
| `<schema-file>` | Path to a schema file |
| `EMPTY` | The expected list of type schema pairs shall be empty |
-## Function: ics_api_edp_get_producer_2() ##
+## Function: ics_api_edp_get_producer_2 ##
Test of GET '/ei-producer/v1/eiproducers/{eiProducerId}' or '/data-producer/v1/info-producers/{infoProducerId}' depending on ics version and optional check of the returned producer.
To test the response code only, provide the response and the producer-id.
| `<type-id>` | Id of the type |
| `EMPTY` | The expected list of types shall be empty |
-## Function: ics_api_edp_delete_producer() ##
+## Function: ics_api_edp_delete_producer ##
Test of DELETE '/ei-producer/v1/eiproducers/{eiProducerId}' or '/data-producer/v1/info-producers/{infoProducerId}' depending on ics version.
To test, provide all parameters.
| `<response-code>` | Expected http response code |
| `<producer-id>` | Id of the producer |
-## Function: ics_api_edp_put_producer() ##
+## Function: ics_api_edp_put_producer ##
Test of PUT '/ei-producer/v1/eiproducers/{eiProducerId}'.
To test, provide all parameters. The list of type/schema pair may be empty.
| `<schema-file>` | Path to a schema file |
| `EMPTY` | The list of type/schema pairs is empty |
-## Function: ics_api_edp_put_producer_2() ##
+## Function: ics_api_edp_put_producer_2 ##
Test of PUT '/ei-producer/v1/eiproducers/{eiProducerId}' or '/data-producer/v1/info-producers/{infoProducerId}' depending on ics version.
To test, provide all parameters. The list of type/schema pair may be empty.
| `<type-id>` | Id of the type |
| `NOTYPE` | The list of types is empty |
-## Function: ics_api_edp_get_producer_jobs() ##
+## Function: ics_api_edp_get_producer_jobs ##
Test of GET '/ei-producer/v1/eiproducers/{eiProducerId}/eijobs' and optional check of the returned producer job.
To test the response code only, provide the response and the producer-id.
| `<template-job-file>` | Path to a job template file |
| `EMPTY` | The list of job/type/target/job-file tuples is empty |
-## Function: ics_api_edp_get_producer_jobs_2() ##
+## Function: ics_api_edp_get_producer_jobs_2 ##
Test of GET '/ei-producer/v1/eiproducers/{eiProducerId}/eijobs' or '/data-producer/v1/info-producers/{infoProducerId}/info-jobs' depending on ics version and optional check of the returned producer job.
To test the response code only, provide the response and the producer-id.
| `<template-job-file>` | Path to a job template file |
| `EMPTY` | The list of job/type/target/job-file tuples is empty |
-## Function: ics_api_service_status() ##
+## Function: ics_api_service_status ##
Test of GET '/status'.
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-## Function: ics_api_idc_get_type_ids() ##
+## Function: ics_api_idc_get_type_ids ##
Test of GET '/data-consumer/v1/info-types' and an optional check of the returned list of type ids.
To test the response code only, provide the response code.
| `<type-id>` | Id of the Info type |
| `EMPTY` | The expected list of type ids shall be empty |
-## Function: ics_api_idc_get_job_ids() ##
+## Function: ics_api_idc_get_job_ids ##
Test of GET '/data-consumer/v1/info-jobs' and optional check of the array of returned job ids.
To test the response code only, provide the response code parameter as well as a type id and an owner id.
| `<job-id>` | Id of the expected job |
| `EMPTY` | The expected list of job id shall be empty |
-## Function: ics_api_idc_get_job() ##
+## Function: ics_api_idc_get_job ##
Test of GET '/data-consumer/v1/info-jobs/{infoJobId}' and optional check of the returned job.
To test the response code only, provide the response code, type id and job id.
| `<owner-id>` | Expected owner for the job |
| `<template-job-file>` | Path to a job template for job parameters of the job |
-## Function: ics_api_idc_put_job() ##
+## Function: ics_api_idc_put_job ##
-Test of PUT '​/data-consumer/v1/info-jobs/{infoJobId}'.
+Test of PUT '/data-consumer/v1/info-jobs/{infoJobId}'.
To test, provide all the specified parameters.
| arg list |
| `<template-job-file>` | Path to a job template for job parameters of the job |
| `VALIIDATE` | Indicator to preform type validation at creation |
-## Function: ics_api_idc_delete_job() ##
+## Function: ics_api_idc_delete_job ##
-Test of DELETE '/A1-EI​/v1​/eitypes​/{eiTypeId}​/eijobs​/{eiJobId}'.
+Test of DELETE '/A1-EI/v1/eitypes/{eiTypeId}/eijobs/{eiJobId}'.
To test, provide all the specified parameters.
| arg list |
| `<type-id>` | Id of the type |
| `<job-id>` | Id of the job |
-## Function: ics_api_idc_get_type() ##
+## Function: ics_api_idc_get_type ##
Test of GET '/data-consumer/v1/info-types/{infoTypeId} and optional check of the returned schema.
To test the response code only, provide the response code parameter as well as the type-id.
| `<type-id>` | Id of the Info type |
| `<schema-file>` | Path to a schema file to compare with the returned schema |
-## Function: ics_api_idc_get_job_status() ##
+## Function: ics_api_idc_get_job_status ##
Test of GET '/data-consumer/v1/info-jobs/{infoJobId}/status' and optional check of the returned status and timeout.
To test the response code only, provide the response code and job id.
| `<status>` | Expected status |
| `<timeout>` | Timeout |
-## Function: ics_api_idc_get_job_status2() ##
+## Function: ics_api_idc_get_job_status2 ##
Test of GET '/data-consumer/v1/info-jobs/{infoJobId}/status' with returned producers and optional check of the returned status and timeout.
To test the response code only, provide the response code and job id.
| `<timeout>` | Timeout |
-## Function: ics_api_idc_get_subscription_ids() ##
+## Function: ics_api_idc_get_subscription_ids ##
Test of GET '/data-consumer/v1/info-type-subscription' with the returned list of subscription ids
| arg list |
| `<EMPTY>` | Indicated for empty list of subscription ids |
| `<subscription-id>` |Id of the subscription |
-## Function: ics_api_idc_get_subscription() ##
+## Function: ics_api_idc_get_subscription ##
Test of GET '/data-consumer/v1/info-type-subscription/{subscriptionId}' with the subscription information
| arg list |
| `<status-uri>` | Url for status notifications |
-## Function: ics_api_idc_put_subscription() ##
+## Function: ics_api_idc_put_subscription ##
Test of PUT '/data-consumer/v1/info-type-subscription/{subscriptionId}' with the subscription information
| arg list |
| `<owner-id>` | Id of the owner |
| `<status-uri>` | Url for status notifications |
-## Function: ics_api_idc_delete_subscription() ##
+## Function: ics_api_idc_delete_subscription ##
Test of DELETE /data-consumer/v1/info-type-subscription/{subscriptionId}
| arg list |
| `<subscription-id>` |Id of the subscription |
-## Function: ics_api_admin_reset() ##
+## Function: ics_api_admin_reset ##
Test of GET '/status'.
| `<response-code>` | Expected http response code |
| `<type>` | Type id, if the interface supports type in url |
-# Description of functions in gateway_api_functions.sh #
+## Function: ics_kube_pvc_reset ##
-## Function: use_gateway_http ##
+Admin reset to remove all data in ics; jobs, producers etc
+NOTE - only works in kubernetes and the pod should not be running
-Use http for all calls to the gateway. This is set by default.
| arg list |
|--|
| None |
-## Function: use_gateway_https ##
-
-Use https for all calls to the gateway.
-| arg list |
-|--|
-| None |
+# Description of functions in kafkapc_api_functions.sh #
-## Function: set_gateway_debug ##
+## Function: use_kafkapc_http ##
-Set debug level logging in the gateway
+Use http for all calls to the KAFKAPC.
| arg list |
|--|
| None |
-## Function: set_gateway_trace ##
+## Function: use_kafkapc_https ##
-Set debug level logging in the trace
+Use https for all calls to the KAFKAPC.
| arg list |
|--|
| None |
-## Function: start_gateway ##
+## Function: start_kafkapc ##
-Start the the gateway container in docker or kube depending on start mode
+Start the KAFKAPC container in docker or kube depending on start mode
| arg list |
|--|
| None |
-## Function: gateway_pms_get_status ##
+## Function: kafkapc_equal ##
-Sample test of pms api (status)
+Tests if a variable value in the KAFKAPC is equal to a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
+See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<response-code>` |
+| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
| parameter | description |
| --------- | ----------- |
-| `<response-code>` | Expected http response code |
+| `<variable-name>` | Variable name in the KAFKAPC |
+| `<target-value>` | Target value for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
+
+## Function: kafkapc_api_reset ##
+
+Deep reset of KAFKAPC. Note that kafka itself is not affected, i.e. created topic still exist in kafka.
+| arg list |
+|--|
+| None |
+
+## Function: kafkapc_api_create_topic ##
+
+Create a topic in kafka via kafkapc.
+| `<response-code> <topic-name> <mime-type>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+| `<mime-type>` | Mime type of the data to send to the topic. Data on the topic is expected to be of this type |
+
+## Function: kafkapc_api_get_topic ##
+
+Create a from kafkapc.
+| `<response-code> <topic-name> <mime-type>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+| `<mime-type>` | Mime type of the topic |
+
+## Function: kafkapc_api_start_sending ##
+
+Start sending msg from the msg queue to kafka for a topic.
+| `<response-code> <topic-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+
+## Function: kafkapc_api_start_receiving ##
+
+Start receiving msg from a kafka topic to the msg queue in kafkapc.
+| `<response-code> <topic-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+
+## Function: kafkapc_api_stop_sending ##
+
+Stop sending msg from the msg queue to kafka for a topic.
+| `<response-code> <topic-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+
+## Function: kafkapc_api_stop_receiving ##
+
+Stop receiving msg from a kafka topic to the msg queue in kafkapc.
+| `<response-code> <topic-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic-name>` | Name of the topic |
+
+## Function: kafkapc_api_post_msg ##
+
+Send a message on a topic.
+| arg list |
+|--|
+| `<response-code> <topic> <mime-type> <msg>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic>` | Topic name |
+| `<mime-type>` | Mime type of the msg |
+| `<msg>` | String msg to send |
+
+## Function: kafkapc_api_get_msg ##
+
+Get a message on a topic.
+| arg list |
+|--|
+| `<response-code> <topic> ([ <mime-type> <msg> ] | NOMSG )` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic>` | Topic name |
+| `<mime-type>` | Mime type of the msg |
+| `<msg>` | String msg to receive |
+| `NOMSG` | Indicated for no msg |
+
+## Function: kafkapc_api_post_msg_from_file ##
+
+Send a message in a file on a topic.
+| arg list |
+|--|
+| `<response-code> <topic> <mime-type> <file>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic>` | Topic name |
+| `<mime-type>` | Mime type of the msg |
+| `<file>` | Filepath to the string msg to send |
+
+## Function: kafkapc_api_get_msg_from_file ##
+
+Get a message on a topic.
+| arg list |
+|--|
+| `<response-code> <topic> <mime-type> <file> ` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Http response code |
+| `<topic>` | Topic name |
+| `<mime-type>` | Mime type of the msg |
+| `<file>` | Filepath to the string msg to receive |
+
+## Function: kafkapc_api_generate_json_payload_file ##
+
+Create json file with dummy data for payload.
+| arg list |
+|--|
+| `<size-in-kb> <filename>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<size-in-kb>` | Generated size in kb |
+| `<filename>` | Path to output file |
+
+## Function: kafkapc_api_generate_text_payload_file ##
+
+Create file with dummy text data for payload.
+| arg list |
+|--|
+| `<size-in-kb> <filename>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<size-in-kb>` | Generated size in kb |
+| `<filename>` | Path to output file |
+
+# Description of functions in kubeproxy_api_functions.sh #
+
+## Function: use_kube_proxy_http ##
+
+Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
+| arg list |
+|--|
+| None |
+
+## Function: use_kube_proxy_https ##
+
+Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
+| arg list |
+|--|
+| None |
+
+## Function: start_kube_proxy ##
+
+Start the kube proxy container in kube. This proxy enabled the test env to access all services and pods in a kube cluster.
+No proxy is started if the function is called in docker mode.
+| arg list |
+|--|
+| None |
+
+# Description of functions in localhelm_api_functions.sh #
+
+## Function: localhelm_create_test_chart ##
+
+Create a dummy chart using helm
+| arg list |
+|--|
+| `chart-name` |
+
+| parameter | description |
+| --------- | ----------- |
+| `chart-name` | Name of the chart |
+
+## Function: localhelm_package_test_chart ##
+
+Package a dummy chart using helm
+| arg list |
+|--|
+| `chart-name` |
+
+| parameter | description |
+| --------- | ----------- |
+| `chart-name` | Name of the chart |
+
+## Function: localhelm_installed_chart_release ##
+
+Check if a chart is installed or not using helm
+| arg list |
+|--|
+| `INSTALLED|NOTINSTALLED <release-name> <name-space> |
+
+| parameter | description |
+| --------- | ----------- |
+| `INSTALLED` | Expecting installed chart |
+| `NOTINSTALLED` | Expecting a not installed chart |
+| `release-name` | Name of the release |
+| `name-space` | Expected namespace |
+
+# Description of functions in mr_api_functions.sh #
+
+## Function: use_mr_http ##
+
+Use http for all Dmaap calls to the MR. This is the default. The admin API is not affected. Note that this function shall be called before preparing the config for Consul.
+| arg list |
+|--|
+| None |
+
+## Function: use_mr_https ##
+
+Use https for all Dmaap call to the MR. The admin API is not affected. Note that this function shall be called before preparing the config for Consul.
+| arg list |
+|--|
+| None |
+
+## Function: start_mr ##
+
+Start the Message Router stub interface container in docker or kube depending on start mode
+| arg list |
+|--|
+| None |
+
+## Function: dmaap_api_print_topics ##
+
+Prints the current list of topics in DMAAP MR
+
+| arg list |
+|--|
+| None |
+
+## Function: mr_equal ##
+
+Tests if a variable value in the Message Router (MR) simulator is equal to a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
+See the 'mrstub' dir for more details.
+| arg list |
+|--|
+| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<variable-name>` | Variable name in the MR |
+| `<target-value>` | Target value for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
+
+## Function: mr_greater ##
+
+Tests if a variable value in the Message Router (MR) simulator is greater than a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the variable is greater than the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes greater than the target value or not.
+See the 'mrstub' dir for more details.
+| arg list |
+|--|
+| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<variable-name>` | Variable name in the MR |
+| `<target-value>` | Target value for the variable |
+| `<timeout-in-sec>` | Max time to wait for the variable to become grater than the target value |
+
+## Function: mr_read ##
+
+Reads the value of a variable in the Message Router (MR) simulator. The value is intended to be passed to a env variable in the test script.
+See the 'mrstub' dir for more details.
+| arg list |
+|--|
+| `<variable-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<variable-name>` | Variable name in the MR |
+
+## Function: mr_print ##
+
+Prints the value of a variable in the Message Router (MR) simulator.
+See the 'mrstub' dir for more details.
+| arg list |
+|--|
+| `<variable-name>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<variable-name>` | Variable name in the MR |
+
+## Function: mr_api_send_json ##
+
+Send json to topic in mr-stub.
+| arg list |
+|--|
+| `<topic-url> <json-msg>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<topic-url>` | Topic url |
+| `<json-msg>` | Json msg as string |
+
+## Function: mr_api_send_text ##
+
+Send text to topic in mr-stub.
+| arg list |
+|--|
+| `<topic-url> <text-msg>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<topic-url>` | Topic url |
+| `<text-msg>` | Text (string) msg |
+
+
+
+## Function: mr_api_send_json_file ##
+
+Send json to topic in mr-stub.
+| arg list |
+|--|
+| `<topic-url> <json-file>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<topic-url>` | Topic url |
+| `<json-file>` | Path to file with json msg as string |
+
+## Function: mr_api_send_text_file ##
+
+Send text to topic in mr-stub.
+| arg list |
+|--|
+| `<topic-url> <text-file>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<topic-url>` | Topic url |
+| `<text-file>` | Path to file with text msg as string |
+
+## Function: mr_api_generate_json_payload_file ##
+
+Create json file with dummy data for payload.
+| arg list |
+|--|
+| `<size-in-kb> <filename>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<size-in-kb>` | Generated size in kb |
+| `<filename>` | Path to output file |
+
+## Function: mr_api_generate_text_payload_file ##
+
+Create file with dummy text data for payload.
+| arg list |
+|--|
+| `<size-in-kb> <filename>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<size-in-kb>` | Generated size in kb |
+| `<filename>` | Path to output file |
+
+# Description of functions in ngw_api_functions.sh #
+
+## Function: use_gateway_http ##
+
+Use http for all calls to the gateway. This is set by default.
+| arg list |
+|--|
+| None |
+
+## Function: use_gateway_https ##
+
+Use https for all calls to the gateway.
+| arg list |
+|--|
+| None |
+
+## Function: set_gateway_debug ##
+
+Set debug level logging in the gateway
+| arg list |
+|--|
+| None |
+
+## Function: set_gateway_trace ##
+
+Set debug level logging in the trace
+| arg list |
+|--|
+| None |
+
+## Function: start_gateway ##
+
+Start the the gateway container in docker or kube depending on start mode
+| arg list |
+|--|
+| None |
+
+## Function: gateway_pms_get_status ##
+
+Sample test of pms api (status)
+Only response code tested - not payload
+| arg list |
+|--|
+| `<response-code>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
## Function: gateway_ics_get_types ##
-Sample test of ics api (get types)
-Only response code tested - not payload
+Sample test of ics api (get types)
+Only response code tested - not payload
+| arg list |
+|--|
+| `<response-code>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+
+# Description of functions in pa_api_functions.sh #
+
+## General ##
+
+Both PMS version 1 and 2 are supported. The version is controlled by the env variable `$PMS_VERSION` set in the test env file.
+For api function in version 2, an url prefix is added if configured.
+
+## Function: use_agent_rest_http ##
+
+Use http for all API calls to the Policy Agent. This is the default.
+| arg list |
+|--|
+| None |
+
+## Function: use_agent_rest_https ##
+
+Use https for all API calls to the Policy Agent.
+| arg list |
+|--|
+| None |
+
+## Function: use_agent_dmaap_http ##
+
+Send and recieve all API calls to the Policy Agent over Dmaap via the MR over http.
+| arg list |
+|--|
+| None |
+
+## Function: use_agent_dmaap_https ##
+
+Send and recieve all API callss to the Policy Agent over Dmaap via the MR over https.
+| arg list |
+|--|
+| None |
+
+## Function: start_policy_agent ##
+
+Start the Policy Agent container or corresponding kube resources depending on docker/kube mode.
+| arg list |
+|--|
+| `<logfile-prefix>` |
+| (docker) `PROXY\|NOPROXY <config-file>` |
+| (kube) `PROXY\|NOPROXY <config-file> [ <data-file> ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `PROXY` | Configure with http proxy, if proxy is started |
+| `NOPROXY` | Configure without http proxy |
+| `<config-file>`| Path to application.yaml |
+| `<data-file>` | Optional path to application_configuration.json |
+
+## Function: stop_policy_agent ##
+
+Stop the pms container (docker) or scale it to zero (kubernetes).
+| arg list |
+|--|
+| None |
+
+## Function: start_stopped_policy_agent ##
+
+Start a previousely stopped pms container (docker) or scale it to 1 (kubernetes).
+| arg list |
+|--|
+| None |
+
+## Function: prepare_consul_config ##
+
+Function to prepare a Consul config based on the previously configured (and started simulators). Note that all simulator must be running and the test script has to configure if http or https shall be used for the components (this is done by the functions 'use_simulator_http', 'use_simulator_https', 'use_sdnc_http', 'use_sdnc_https', 'use_mr_http', 'use_mr_https')
+| arg list |
+|--|
+| `SDNC|NOSDNC <output-file>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `SDNC` | Configure with controller |
+| `NOSDNC` | Configure without controller |
+| `<output-file>` | The path to the json output file containing the prepared config. This file is used in 'consul_config_app' |
+
+## Function: agent_load_config ##
+
+Load the config into a config map (kubernetes only).
+| arg list |
+|--|
+| `<data-file>` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<data-file>` | Path to application_configuration.json |
+
+## Function: set_agent_debug ##
+
+Configure the Policy Agent log on debug level. The Policy Agent must be running.
+| arg list |
+|--|
+| None |
+
+## Function: set_agent_trace ##
+
+Configure the Policy Agent log on trace level. The Policy Agent must be running.
+| arg list |
+|--|
+| None |
+
+## Function: use_agent_retries ##
+
+Configure the Policy Agent to make upto 5 retries if an API calls return any of the specified http return codes.
+| arg list |
+|--|
+| `[<response-code>]*` |
+
+## Function: check_policy_agent_logs ##
+
+Check the Policy Agent log for any warnings and errors and print the count of each.
+| arg list |
+|--|
+| None |
+
+## Function: api_equal ##
+
+Tests if the array length of a json array in the Policy Agent simulator is equal to a target value.
+Without the timeout, the test sets pass or fail immediately depending on if the array length is equal to the target or not.
+With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the array length becomes equal to the target value or not.
+See the 'cr' dir for more details.
+
+| arg list |
+|--|
+| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<variable-name>` | Relative url. Example 'json:policy_types' - checks the json array length of the url /policy_types |
+| `<target-value>` | Target value for the length |
+| `<timeout-in-sec>` | Max time to wait for the length to reach the target value |
+
+## Function: api_get_policies ##
+
+Test of GET '/policies' or V2 GET '/v2/policy-instances' and optional check of the array of returned policies.
+To test the response code only, provide the response code parameter as well as the following three parameters.
+To also test the response payload add the 'NOID' for an expected empty array or repeat the last five/seven parameters for each expected policy.
+
+| arg list |
+|--|
+| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <policy-type-id>\|NOTYPE [ NOID \| [<policy-id> <ric-id> <service-id> EMPTY\|<policy-type-id> <template-file>]*]` |
+
+| arg list V2 |
+|--|
+| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <policy-type-id>\|NOTYPE [ NOID \| [<policy-id> <ric-id> <service-id> EMPTY\|<policy-type-id> <transient> <notification-url> <template-file>]*]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<ric-id>` | Id of the ric |
+| `NORIC` | Indicator that no ric is provided |
+| `<service-id>` | Id of the service |
+| `NOSERVICE` | Indicator that no service id is provided |
+| `<policy-type-id>` | Id of the policy type |
+| `NOTYPE` | Indicator that no type id is provided |
+| `NOID` | Indicator that no policy id is provided - indicate empty list of policies|
+| `<policy-id>` | Id of the policy |
+| `EMPTY` | Indicate for the special empty policy type |
+| `transient` | Transient, true or false |
+| `notification-url` | Url for notifications |
+| `<template-file>` | Path to the template file for the policy (same template used when creating the policy) |
+
+## Function: api_get_policy ##
+
+Test of GET '/policy' or V2 GET '/v2/policies/{policy_id}' and optional check of the returned json payload.
+To test the the response code only, provide the expected response code and policy id.
+To test the contents of the returned json payload, add a path to the template file used when creating the policy.
+
+| arg list |
+|--|
+| `<response-code> <policy-id> [<template-file>]` |
+
+| arg list V2|
+|--|
+| `<response-code> <policy-id> [ <template-file> <service-name> <ric-id> <policytype-id>\|NOTYPE <transient> <notification-url>\|NOURL ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-id>` | Id of the policy |
+| `<template-file>` | Path to the template file for the policy (same template used when creating the policy) |
+| `<service-id>` | Id of the service |
+| `<ric-id>` | Id of the ric |
+| `<policy-type-id>` | Id of the policy type |
+| `NOTYPE` | Indicator that no type id is provided |
+| `transient` | Transient, true or false |
+| `notification-url` | Url for notifications |
+
+## Function: api_put_policy ##
+
+Test of PUT '/policy' or V2 PUT '/policies'.
+If more than one policy shall be created, add a count value to indicate the number of policies to create. Note that if more than one policy shall be created the provided policy-id must be numerical (will be used as the starting id).
+
+| arg list |
+|--|
+| `<response-code> <service-name> <ric-id> <policytype-id> <policy-id> <transient> <template-file> [<count>]` |
+
+| arg list V2 |
+|--|
+| `<response-code> <service-name> <ric-id> <policytype-id>\|NOTYPE <policy-id> <transient>\|NOTRANSIENT <notification-url>\|NOURL <template-file> [<count>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<service-id>` | Id of the service |
+| `<ric-id>` | Id of the ric |
+| `<policy-type-id>` | Id of the policy type |
+| `<policy-id>` | Id of the policy. This value shall be a numeric value if more than one policy shall be created |
+| `transient>` | Transient 'true' or 'false'. 'NOTRANSIENT' can be used to indicate using the default value (no transient value provided) |
+| `notification-url` | Url for notifications |
+|`NOURL`| Indicator for no url |
+| `<template-file>` | Path to the template file for the policy |
+| `<count>` | An optional count (default is 1). If a value greater than 1 is given, the policy ids will use the given policy id as the first id and add 1 to that id for each new policy |
+
+## Function: api_put_policy_batch ##
+
+This tests the same as function 'api_put_policy' except that all put requests are sent to dmaap in one go and then the responses are polled one by one.
+If the agent api is not configured to use dmaap (see 'use_agent_dmaap', 'use_agent_rest_http' and 'use_agent_rest_https'), an error message is printed.
+For arg list and parameters, see 'api_put_policy'.
+
+## Function: api_put_policy_parallel ##
+
+This tests the same as function 'api_put_policy' except that the policy create is spread out over a number of processes and it only uses the agent rest API. The total number of policies created is determined by the product of the parameters 'number-of-rics' and 'count'. The parameter 'number-of-threads' shall be selected to be not evenly divisible by the product of the parameters 'number-of-rics' and 'count' - this is to ensure that one process does not handle the creation of all the policies in one ric.
+
+| arg list |
+|--|
+| `<response-code> <service-name> <ric-id-base> <number-of-rics> <policytype-id> <policy-start-id> <transient> <template-file> <count-per-ric> <number-of-threads>`
+
+| arg list |
+|--|
+| `<response-code> <service-name> <ric-id-base> <number-of-rics> <policytype-id> <policy-start-id> <transient> <notification-url>\|NOURL <template-file> <count-per-ric> <number-of-threads>`
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<service-id>` | Id of the service |
+| `<ric-id-base>` | The base id of the rics, ie ric id without the sequence number. The sequence number is added during processing |
+| `<number-of-rics>` | The number of rics, assuming the first index is '1'. The index is added to the 'ric-id-base' id |
+| `<policy-type-id>` | Id of the policy type |
+| `<policy-start-id>` | Id of the policy. This value shall be a numeric value and will be the id of the first policy |
+| `transient>` | Transient 'true' or 'false'. 'NOTRANSIENT' can be used to indicate using the default value (no transient value provide) |
+| `notification-url` | Url for notifications |
+| `<template-file>` | Path to the template file for the policy |
+| `<count-per-ric>` | Number of policies per ric |
+| `<number-of-threads>` | Number of threads (processes) to run in parallel |
+
+## Function: api_delete_policy ##
+
+This tests the DELETE '/policy' or V2 DELETE '/v2/policies/{policy_id}'. Removes the indicated policy or a 'count' number of policies starting with 'policy-id' as the first id.
+
+| arg list |
+|--|
+| `<response-code> <policy-id> [<count>]`
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-id>` | Id of the policy |
+| `<count>` | An optional count of policies to delete. The 'policy-id' will be the first id to be deleted. |
+
+## Function: api_delete_policy_batch ##
+
+This tests the same as function 'api_delete_policy' except that all delete requests are sent to dmaap in one go and then the responses are polled one by one.
+If the agent api is not configured to used dmaap (see 'use_agent_dmaap', 'use_agent_rest_http' and 'use_agent_rest_https'), an error message is printed.
+For arg list and parameters, see 'api_delete_policy'.
+
+## Function: api_delete_policy_parallel ##
+
+This tests the same as function 'api_delete_policy' except that the policy delete is spread out over a number of processes and it only uses the agent rest API. The total number of policies deleted is determined by the product of the parameters 'number-of-rics' and 'count'. The parameter 'number-of-threads' shall be selected to be not evenly divisible by the product of the parameters 'number-of-rics' and 'count' - this is to ensure that one process does not handle the deletion of all the policies in one ric.
+
+| arg list |
+|--|
+| `<response-code> <ric-id-base> <number-of-rics> <policy-start-id> <count-per-ric> <number-of-threads>`
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<ric-id-base>` | The base id of the rics, ie ric id without the sequence number. The sequence number is added during processing |
+| `<number-of-rics>` | The number of rics, assuming the first index is '1' |
+| `<policy-start-id>` | Id of the policy. This value shall be a numeric value and will be the id of the first policy |
+| `<count-per-ric>` | Number of policies per ric |
+| `<number-of-threads>` | Number of threads (processes) to run in parallel |
+
+## Function: api_get_policy_ids ##
+
+Test of GET '/policy_ids' or V2 GET '/v2/policies'.
+To test response code only, provide the response code parameter as well as the following three parameters.
+To also test the response payload add the 'NOID' for an expected empty array or repeat the 'policy-instance-id' for each expected policy id.
+
+| arg list |
+|--|
+| `<response-code> <ric-id>\|NORIC <service-id>\|NOSERVICE <type-id>\|NOTYPE ([<policy-instance-id]*\|NOID)` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<ric-id>` | Id of the ric |
+| `NORIC` | Indicator that no ric is provided |
+| `<service-id>` | Id of the service |
+| `NOSERVICE` | Indicator that no service id is provided |
+| `type-id>` | Id of the policy type |
+| `NOTYPE` | Indicator that no type id is provided |
+| `NOID` | Indicator that no policy id is provided - indicate empty list of policies|
+| `<policy-instance-id>` | Id of the policy |
+
+## Function: api_get_policy_schema ##
+
+Test of V2 GET '/v2/policy-types/{policyTypeId}' and optional check of the returned json schema.
+To test the response code only, provide the expected response code and policy type id.
+To test the contents of the returned json schema, add a path to a schema file to compare with.
+
+| arg list |
+|--|
+| `<response-code> <policy-type-id> [<schema-file>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-type-id>` | Id of the policy type |
+| `<schema-file>` | Path to the schema file for the policy type |
+
+## Function: api_get_policy_schema ##
+
+Test of GET '/policy_schema' and optional check of the returned json schema.
+To test the response code only, provide the expected response code and policy type id.
+To test the contents of the returned json schema, add a path to a schema file to compare with.
+
+| arg list |
+|--|
+| `<response-code> <policy-type-id> [<schema-file>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-type-id>` | Id of the policy type |
+| `<schema-file>` | Path to the schema file for the policy type |
+
+## Function: api_get_policy_schemas ##
+
+Test of GET '/policy_schemas' and optional check of the returned json schemas.
+To test the response code only, provide the expected response code and ric id (or NORIC if no ric is given).
+To test the contents of the returned json schema, add a path to a schema file to compare with (or NOFILE to represent an empty '{}' type)
+
+| arg list |
+|--|
+| `<response-code> <ric-id>\|NORIC [<schema-file>\|NOFILE]*` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<ric-id>` | Id of the ric |
+| `NORIC` | No ric id given |
+| `<schema-file>` | Path to the schema file for the policy type |
+| `NOFILE` | Indicate the template for an empty type |
+
+## Function: api_get_policy_status ##
+
+Test of GET '/policy_status' or V2 GET '/policies/{policy_id}/status'.
+
+| arg list |
+|--|
+| `<response-code> <policy-id> (STD\|STD2 <enforce-status>\|EMPTY [<reason>\|EMPTY])\|(OSC <instance-status> <has-been-deleted>)` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-id>` | Id of the policy |
+| `STD` | Indicator of status of Standarized A1 |
+| `STD2` | Indicator of status of Standarized A1 version 2 |
+| `<enforce-status>` | Enforcement status |
+| `<reason>` | Optional reason |
+| `EMPTY` | Indicator of empty string status or reason |
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `<instance-status>` | Instance status |
+| `<has-been-deleted>` | Deleted status, true or false |
+
+## Function: api_get_policy_types ##
+
+Test of GET '/policy_types' or V2 GET '/v2/policy-types' and optional check of the returned ids.
+To test the response code only, provide the expected response code and ric id (or NORIC if no ric is given).
+To test the contents of the returned json payload, add the list of expected policy type id (or 'EMPTY' for the '{}' type)
+
+| arg list |
+|--|
+| `<response-code> [<ric-id>\|NORIC [<policy-type-id>\|EMPTY [<policy-type-id>]*]]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<ric-id>` | Id of the ric |
+| `NORIC` | No ric id given |
+| `<policy-type-id>` | Id of the policy type |
+| `EMPTY` | Indicate the empty type |
+
+## Function: api_get_status ##
+
+Test of GET /status or V2 GET /status
+
| arg list |
|--|
| `<response-code>` |
| --------- | ----------- |
| `<response-code>` | Expected http response code |
-# Description of functions in http_proxy_api_functions.sh #
+## Function: api_get_ric ##
-## Function: use_http_proxy_http ##
+Test of GET '/ric' or V2 GET '/v2/rics/ric'
+To test the response code only, provide the expected response code and managed element id.
+To test the returned ric id, provide the expected ric id.
-Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
| arg list |
|--|
-| None |
+| `<reponse-code> <managed-element-id> [<ric-id>]` |
-## Function: use_http_proxy_https ##
+| arg list V2 |
+|--|
+| `<reponse-code> <management-element-id>\|NOME <ric-id>\|<NORIC> [<string-of-ricinfo>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<managed-element-id>` | Id of the managed element |
+| `NOME` | Indicator for no ME |
+| `ric-id` | Id of the ric |
+| `NORIC` | Indicator no RIC |
+| `string-of-ricinfo` | String of ric info |
-Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
-| arg list |
-|--|
-| None |
+## Function: api_get_rics ##
-## Function: start_http_proxy ##
+Test of GET '/rics' or V2 GET '/v2/rics' and optional check of the returned json payload (ricinfo).
+To test the response code only, provide the expected response code and policy type id (or NOTYPE if no type is given).
+To test also the returned payload, add the formatted string of info in the returned payload.
+Format of ricinfo: <br>`<ric-id>:<list-of-mes>:<list-of-policy-type-ids>`<br>
+Example <br>`<space-separate-string-of-ricinfo> = "ricsim_g1_1:me1_ricsim_g1_1,me2_ricsim_g1_1:1,2,4 ricsim_g1_1:me2_........."`
-Start the http proxy container in docker or kube depending on running mode.
| arg list |
|--|
-| None |
+| `<reponse-code> <policy-type-id>\|NOTYPE [<space-separate-string-of-ricinfo>]` |
-# Description of functions in kube_proxy_api_functions.sh #
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<policy-type-id>` | Policy type id of the ric |
+| `NOTYPE>` | No type given |
+| `<space-separate-string-of-ricinfo>` | A space separated string of ric info - needs to be quoted |
-## Function: use_kube_proxy_http ##
+## Function: api_put_service ##
-Use http for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
+Test of PUT '/service' or V2 PUT '/service'.
| arg list |
|--|
-| None |
+| `<response-code> <service-name> <keepalive-timeout> <callbackurl>` |
-## Function: use_kube_proxy_https ##
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<service-name>` | Service name |
+| `<keepalive-timeout>` | Timeout value |
+| `<callbackurl>` | Callback url |
-Use https for all proxy requests. Note that this only applicable to the actual proxy request, the proxied protocol can still be http and https.
-| arg list |
-|--|
-| None |
+## Function: api_get_services ##
-## Function: start_kube_proxy ##
+Test of GET '/service' or V2 GET '/v2/services' and optional check of the returned json payload.
+To test only the response code, omit all parameters except the expected response code.
+To test the returned json, provide the parameters after the response code.
-Start the kube proxy container in kube. This proxy enabled the test env to access all services and pods in a kube cluster.
-No proxy is started if the function is called in docker mode.
| arg list |
|--|
-| None |
+| `<response-code> [ (<query-service-name> <target-service-name> <keepalive-timeout> <callbackurl>) \| (NOSERVICE <target-service-name> <keepalive-timeout> <callbackurl> [<target-service-name> <keepalive-timeout> <callbackurl>]* )]` |
-# Description of functions in mr_api_functions.sh #
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<query-service-name>` | Service name for the query |
+| `<target-service-name>` | Target service name|
+| `<keepalive-timeout>` | Timeout value |
+| `<callbackurl>` | Callback url |
+| `NOSERVICE` | Indicator of no target service name |
-## Function: use_mr_http ##
+## Function: api_get_service_ids ##
+
+Test of GET '/services' or V2 GET /'v2/services'. Only check of service ids.
-Use http for all Dmaap calls to the MR. This is the default. The admin API is not affected. Note that this function shall be called before preparing the config for Consul.
| arg list |
|--|
-| None |
+| `<response-code> [<service-name>]*` |
-## Function: use_mr_https ##
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<service-name>` | Service name |
-Use https for all Dmaap call to the MR. The admin API is not affected. Note that this function shall be called before preparing the config for Consul.
-| arg list |
-|--|
-| None |
+## Function: api_delete_services ##
-## Function: start_mr ##
+Test of DELETE '/services' or V2 DELETE '/v2/services/{serviceId}'
-Start the Message Router stub interface container in docker or kube depending on start mode
| arg list |
|--|
-| None |
+| `<response-code> [<service-name>]*` |
-## Function: mr_equal ##
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `<service-name>` | Service name |
+
+## Function: api_put_services_keepalive ##
+
+Test of PUT '/services/keepalive' or V2 PUT '/v2/services/{service_id}/keepalive'
-Tests if a variable value in the Message Router (MR) simulator is equal to a target value.
-Without the timeout, the test sets pass or fail immediately depending on if the variable is equal to the target or not.
-With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes equal to the target value or not.
-See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+| `<response-code> <service-name>` |
| parameter | description |
| --------- | ----------- |
-| `<variable-name>` | Variable name in the MR |
-| `<target-value>` | Target value for the variable |
-| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
+| `<response-code>` | Expected http response code |
+| `<service-name>` | Service name |
-## Function: mr_greater ##
+## Function: api_put_configuration ##
+
+Test of PUT '/v2/configuration'
-Tests if a variable value in the Message Router (MR) simulator is greater than a target value.
-Without the timeout, the test sets pass or fail immediately depending on if the variable is greater than the target or not.
-With the timeout, the test waits up to the timeout seconds before setting pass or fail depending on if the variable value becomes greater than the target value or not.
-See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<variable-name> <target-value> [ <timeout-in-sec> ]` |
+| `<response-code> <config-file>` |
| parameter | description |
| --------- | ----------- |
-| `<variable-name>` | Variable name in the MR |
-| `<target-value>` | Target value for the variable |
-| `<timeout-in-sec>` | Max time to wait for the variable to become grater than the target value |
+| `<response-code>` | Expected http response code |
+| `<config-file>` | Path json config file |
-## Function: mr_read ##
+## Function: api_get_configuration ##
+
+Test of GET '/v2/configuration'
-Reads the value of a variable in the Message Router (MR) simulator. The value is intended to be passed to a env variable in the test script.
-See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<variable-name>` |
+| `<response-code> [<config-file>]` |
| parameter | description |
| --------- | ----------- |
-| `<variable-name>` | Variable name in the MR |
+| `<response-code>` | Expected http response code |
+| `<config-file>` | Path json config file to compare the retrieved config with |
-## Function: mr_print ##
+## Function: pms_kube_pvc_reset ##
+Admin reset to remove all policies and services
+All types and instances etc are removed - types and instances in a1 sims need to be removed separately
+NOTE - only works in kubernetes and the pod should not be running
-Prints the value of a variable in the Message Router (MR) simulator.
-See the 'mrstub' dir for more details.
| arg list |
|--|
-| `<variable-name>` |
+| None |
-| parameter | description |
-| --------- | ----------- |
-| `<variable-name>` | Variable name in the MR |
# Description of functions in prodstub_api_functions.sh #
|--|
| None |
-## Function: prodstub_arm_producer() ##
+## Function: prodstub_arm_producer ##
Preconfigure the prodstub with a producer. The producer supervision response code is optional, if not given the response code will be set to 200.
| `<producer-id>` | Id of the producer |
| `<forced_response_code>` | Forced response code for the producer callback url |
-## Function: prodstub_arm_job_create() ##
+## Function: prodstub_arm_job_create ##
Preconfigure the prodstub with a job or update an existing job. Optional create/update job response code, if not given the response code will be set to 200/201 depending on if the job has been previously created or not.
| `<job-id>` | Id of the job |
| `<forced_response_code>` | Forced response code for the create callback url |
-## Function: prodstub_arm_job_delete() ##
+## Function: prodstub_arm_job_delete ##
Preconfigure the prodstub with a job. Optional delete job response code, if not given the response code will be set to 204/404 depending on if the job exists or not.
| `<job-id>` | Id of the job |
| `<forced_response_code>` | Forced response code for the delete callback url |
-## Function: prodstub_arm_type() ##
+## Function: prodstub_arm_type ##
Preconfigure the prodstub with a type for a producer. Can be called multiple times to add more types.
| `<producer-id>` | Id of the producer |
| `<type-id>` | Id of the type |
-## Function: prodstub_disarm_type() ##
+## Function: prodstub_disarm_type ##
Remove a type for the producer in the rodstub. Can be called multiple times to remove more types.
| `<producer-id>` | Id of the producer |
| `<type-id>` | Id of the type |
-## Function: prodstub_check_jobdata() ##
+## Function: prodstub_check_jobdata ##
Check a job in the prodstub towards the list of provided parameters.
| `<job-owner>` | Id of the job owner |
| `<template-job-file>` | Path to a job template file |
-## Function: prodstub_check_jobdata_2() ##
+## Function: prodstub_check_jobdata_2 ##
Check a job in the prodstub towards the list of provided parameters.
| `<job-owner>` | Id of the job owner |
| `<template-job-file>` | Path to a job template file |
-## Function: prodstub_check_jobdata_3() ##
+## Function: prodstub_check_jobdata_3 ##
Check a job in the prodstub towards the list of provided parameters.
| `<job-owner>` | Id of the job owner |
| `<template-job-file>` | Path to a job template file |
-## Function: prodstub_delete_jobdata() ##
+## Function: prodstub_delete_jobdata ##
Delete the job parameters, job data, for a job.
| `<target-value>` | Target value for the variable |
| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-# Description of functions in rapp_catalogue_api_function.sh #
+# Description of functions in rc_api_function.sh #
## Function: use_rapp_catalogue_http ##
| `<target-value>` | Target value for the variable |
| `<timeout-in-sec>` | Max time to wait for the variable to reach the target value |
-## Function: rapp_cat_api_get_services() ##
+## Function: rapp_cat_api_get_services ##
Check all registered services.
| `<description>` | Description of the service |
| `EMPTY` | Indicator for an empty list |
-## Function: rapp_cat_api_put_service() ##
+## Function: rapp_cat_api_put_service ##
Register a services.
| `<display-name>` | Dislay name of the service |
| `<description>` | Description of the service |
-## Function: rapp_cat_api_get_service() ##
+## Function: rapp_cat_api_get_service ##
Check a registered service.
| `<display-name>` | Dislay name of the service |
| `<description>` | Description of the service |
-## Function: rapp_cat_api_delete_service() ##
+## Function: rapp_cat_api_delete_service ##
Check a registered service.
| `<response-code>` | Expected http response code |
| `<service-id>` | Id of the service |
-# Description of functions in ricsimulator_api_functions.sh #
+# Description of functions in ricsim_api_functions.sh #
The functions below only use the admin interface of the simulator, no usage of the A1 interface.
| `<ric-id>` | Id of the ric |
| `<delay-in-seconds>` | Delay in seconds. If omitted, the delay is removed |
+# Description of functions in sdnc_api_functions.sh #
+
+The file contains a selection of the possible API tests towards the SDNC (a1-controller)
+
+## Function: use_sdnc_http ##
+
+Use http for all API calls towards the SDNC A1 Controller. This is the default. Note that this function shall be called before preparing the config for Consul.
+| arg list |
+|--|
+| None |
+
+## Function: use_sdnc_https ##
+
+Use https for all API calls towards the SDNC A1 Controller. Note that this function shall be called before preparing the config for Consul.
+| arg list |
+|--|
+| None |
+
+## Function: start_sdnc ##
+
+Start the SDNC A1 Controller container and its database container
+| arg list |
+|--|
+| None |
+
+## Function: stop_sdnc ##
+
+Stop the SDNC A1 Controller container and its database container
+| arg list |
+|--|
+| None |
+
+## Function: start_stopped_sdnc ##
+
+Start a previously stopped SDNC
+| arg list |
+|--|
+| None |
+
+## Function: check_sdnc_logs ##
+
+Check the SDNC log for any warnings and errors and print the count of each.
+| arg list |
+|--|
+| None |
+
+## Function: controller_api_get_A1_policy_ids ##
+
+Test of GET policy ids towards OSC or STD type simulator.
+To test response code only, provide the response code, 'OSC' + policy type or 'STD'
+To test the response payload, include the ids of the expexted response.
+
+| arg list |
+|--|
+| `<response-code> (OSC <ric-id> <policy-type-id> [ <policy-id> [<policy-id>]* ]) \| ( STD <ric-id> [ <policy-id> [<policy-id>]* ]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `<ric-id>` | Id of the ric |
+| `policy-type-id>` | Id of the policy type |
+| `<policy-id>` | Id of the policy |
+| `STD` | Indicator of status of Standarized A1 |
+
+## Function: controller_api_get_A1_policy_type ##
+
+Test of GET a policy type (OSC only)
+
+| arg list |
+|--|
+| `<response-code> OSC <ric-id> <policy-type-id> [<policy-type-file>]` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `<ric-id>` | Id of the ric |
+| `policy-type-id>` | Id of the policy type |
+| `policy-type-file>` | Optional schema file to compare the returned type with |
+
+## Function: controller_api_delete_A1_policy ##
+
+Deletes a policy instance
+
+| arg list |
+|--|
+| `(STD <ric-id> <policy-id>) \| (OSC <ric-id> <policy-type-id> <policy-id>)` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `STD` | Indicator of status of Standarized A1 |
+| `<ric-id>` | Id of the ric |
+| `<policy-id>` | Id of the policy |
+| `policy-type-id>` | Id of the policy type |
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `policy-type-file>` | Optional schema file to compare the returned type with |
+
+## Function: controller_api_put_A1_policy ##
+
+Creates a policy instance
+
+| arg list |
+|--|
+| `<response-code> (STD <ric-id> <policy-id> <template-file> ) \| (OSC <ric-id> <policy-type-id> <policy-id> <template-file>)` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `STD` | Indicator of status of Standarized A1 |
+| `<ric-id>` | Id of the ric |
+| `<policy-id>` | Id of the policy |
+| `<template-file>` | Path to the template file of the policy|
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `<policy-type-id>` | Id of the policy type |
+
+## Function: controller_api_get_A1_policy_status ##
+
+Checks the status of a policy
+
+ arg list |
+|--|
+| `<response-code> (STD <ric-id> <policy-id> <enforce-status> [<reason>]) \| (OSC <ric-id> <policy-type-id> <policy-id> <instance-status> <has-been-deleted>)` |
+
+| parameter | description |
+| --------- | ----------- |
+| `<response-code>` | Expected http response code |
+| `STD` | Indicator of status of Standarized A1 |
+| `<ric-id>` | Id of the ric |
+| `<policy-id>` | Id of the policy |
+| `<enforce-status>` | Enforcement status |
+| `<reason>` | Optional reason |
+| `OSC` | Indicator of status of Non-Standarized OSC A1 |
+| `<policy-type-id>` | Id of the policy type |
+| `<instance-status>` | Instance status |
+| `<has-been-deleted>` | Deleted status, true or false |
+
+
## License
Copyright (C) 2020 Nordix Foundation. All rights reserved.
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service management functions and test functions for Chartmuseum
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__CHARTMUS_imagesetup() {
+ __check_and_create_image_var CHARTMUS "CHART_MUS_IMAGE" "CHART_MUS_IMAGE_BASE" "CHART_MUS_IMAGE_TAG" REMOTE_OTHER "$CHART_MUS_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__CHARTMUS_imagepull() {
+ __check_and_pull_image $2 "$CHART_MUS_DISPLAY_NAME" $CHART_MUS_APP_NAME CHART_MUS_IMAGE
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__CHARTMUS_imagebuild() {
+ echo -e $RED" Image for app CHARTMUS shall never be built"$ERED
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__CHARTMUS_image_data() {
+ echo -e "$CHART_MUS_DISPLAY_NAME\t$(docker images --format $1 $CHART_MUS_IMAGE)" >> $2
+ if [ ! -z "$CHART_MUS_IMAGE_SOURCE" ]; then
+ echo -e "-- source image --\t$(docker images --format $1 $CHART_MUS_IMAGE_SOURCE)" >> $2
+ fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__CHARTMUS_kube_scale_zero() {
+ __kube_scale_all_resources $KUBE_SIM_NAMESPACE autotest CHARTMUS
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__CHARTMUS_kube_scale_zero_and_wait() {
+ echo -e $RED" CHARTMUS app is not scaled in this state"$ERED
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__CHARTMUS_kube_delete_all() {
+ __kube_delete_all_resources $KUBE_SIM_NAMESPACE autotest CHARTMUS
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__CHARTMUS_store_docker_logs() {
+ if [ $RUNMODE == "KUBE" ]; then
+ kubectl logs -l "autotest=CHARTMUS" -n $KUBE_SIM_NAMESPACE --tail=-1 > $1$2_chartmuseum.log 2>&1
+ else
+ docker logs $CHART_MUS_APP_NAME > $1$2_chartmuseum.log 2>&1
+ fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__CHARTMUS_initial_setup() {
+ use_chart_mus_http
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CHARTMUS_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "CHARTMUS $CHART_MUS_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "CHARTMUS $CHART_MUS_APP_NAME"
+ fi
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CHARTMUS_test_requirements() {
+ :
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Chartmuseum
+# args: -
+# (Function for test scripts)
+use_chart_mus_http() {
+ __chart_mus_set_protocoll "http" $CHART_MUS_INTERNAL_PORT $CHART_MUS_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Chartmuseum
+# args: -
+# (Function for test scripts)
+use_chart_mus_https() {
+ __chart_mus_set_protocoll "https" $CHART_MUS_INTERNAL_SECURE_PORT $CHART_MUS_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__chart_mus_set_protocoll() {
+ echo -e $BOLD"$CHART_MUS_DISPLAY_NAME protocol setting"$EBOLD
+ echo -e " Using $BOLD $1 $EBOLD towards $CHART_MUS_DISPLAY_NAME"
+
+ ## Access to Chartmuseum
+
+ CHART_MUS_SERVICE_PATH=$1"://"$CHART_MUS_APP_NAME":"$2 # docker access, container->container and script->container via proxy
+ CHART_MUS_SERVICE_PORT=$2
+ CHART_MUS_SERVICE_HOST=$CHART_MUS_APP_NAME
+ if [ $RUNMODE == "KUBE" ]; then
+ CHART_MUS_SERVICE_PATH=$1"://"$CHART_MUS_APP_NAME.$KUBE_SIM_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+ CHART_MUS_SERVICE_PORT=$3
+ CHART_MUS_SERVICE_HOST=$CHART_MUS_APP_NAME.$KUBE_SIM_NAMESPACE
+ fi
+ CHART_MUS_SERVICE_HTTPX=$1
+
+ echo ""
+}
+
+### Admin API functions Chartmuseum
+
+###########################
+### Chartmuseum functions
+###########################
+
+# Export env vars for config files, docker compose and kube resources
+# args:
+__chartmuseum_export_vars() {
+ export CHART_MUS_APP_NAME
+ export CHART_MUS_DISPLAY_NAME
+
+ export DOCKER_SIM_NWNAME
+ export KUBE_SIM_NAMESPACE
+
+ export CHART_MUS_IMAGE
+ export CHART_MUS_INTERNAL_PORT
+ export CHART_MUS_EXTERNAL_PORT
+
+ export CHART_MUS_CHART_CONTR_CHARTS
+
+}
+
+
+# Start the Chartmuseum in the simulator group
+# args: -
+# (Function for test scripts)
+start_chart_museum() {
+
+ echo -e $BOLD"Starting $CHART_MUS_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+
+ # Check if app shall be fully managed by the test script
+ __check_included_image "CHARTMUS"
+ retcode_i=$?
+
+ # Check if app shall only be used by the testscipt
+ __check_prestarted_image "CHARTMUS"
+ retcode_p=$?
+
+ if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+ echo -e $RED"The $CHART_MUS_NAME app is not included as managed nor prestarted in this test script"$ERED
+ echo -e $RED"The $CHART_MUS_APP_NAME will not be started"$ERED
+ exit
+ fi
+ if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+ echo -e $RED"The $CHART_MUS_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+ echo -e $RED"The $CHART_MUS_APP_NAME will not be started"$ERED
+ exit
+ fi
+
+ if [ $retcode_p -eq 0 ]; then
+ echo -e " Using existing $CHART_MUS_APP_NAME deployment and service"
+ echo " Setting RC replicas=1"
+ __kube_scale deployment $CHART_MUS_APP_NAME $KUBE_SIM_NAMESPACE 1
+ fi
+
+ if [ $retcode_i -eq 0 ]; then
+ echo -e " Creating $CHART_MUS_APP_NAME deployment and service"
+
+ __kube_create_namespace $KUBE_SIM_NAMESPACE
+
+ __chartmuseum_export_vars
+
+ # Create service
+ input_yaml=$SIM_GROUP"/"$CHART_MUS_COMPOSE_DIR"/"svc.yaml
+ output_yaml=$PWD/tmp/chartmus_svc.yaml
+ __kube_create_instance service $CHART_MUS_APP_NAME $input_yaml $output_yaml
+
+ # Create app
+ input_yaml=$SIM_GROUP"/"$CHART_MUS_COMPOSE_DIR"/"app.yaml
+ output_yaml=$PWD/tmp/chartmus_app.yaml
+ __kube_create_instance app $CHART_MUS_APP_NAME $input_yaml $output_yaml
+ fi
+
+ __check_service_start $CHART_MUS_APP_NAME $CHART_MUS_SERVICE_PATH$CHART_MUS_ALIVE_URL
+ else
+
+ # Check if docker app shall be fully managed by the test script
+ __check_included_image 'CHARTMUS'
+ if [ $? -eq 1 ]; then
+ echo -e $RED"The Chartmuseum app is not included as managed in this test script"$ERED
+ echo -e $RED"The Chartmuseum will not be started"$ERED
+ exit
+ fi
+
+ __chartmuseum_export_vars
+
+ __start_container $CHART_MUS_COMPOSE_DIR "" NODOCKERARGS 1 $CHART_MUS_APP_NAME
+
+ __check_service_start $CHART_MUS_APP_NAME $CHART_MUS_SERVICE_PATH$CHART_MUS_ALIVE_URL
+ fi
+ echo ""
+ return 0
+}
+
+# Excute a curl cmd towards the chartmuseum simulator and check the response code.
+# args: TEST|CONF <expected-response-code> <curl-cmd-string>
+__execute_curl_to_chartmuseum() {
+ TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+ echo "(${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
+ proxyflag=""
+ if [ ! -z "$KUBE_PROXY_PATH" ]; then
+ if [ $KUBE_PROXY_HTTPX == "http" ]; then
+ proxyflag=" --proxy $KUBE_PROXY_PATH"
+ else
+ proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
+ fi
+ fi
+ echo " CMD: $3 -skw %{http_code} $proxyflag" >> $HTTPLOG
+ res="$($3 -skw %{http_code} $proxyflag)"
+ echo " RESP: $res" >> $HTTPLOG
+ retcode=$?
+ if [ $retcode -ne 0 ]; then
+ __log_conf_fail_general " Fatal error when executing curl, response: "$retcode
+ return 1
+ fi
+ status=${res:${#res}-3}
+ if [ $status -eq $2 ]; then
+ if [ $1 == "TEST" ]; then
+ __log_test_pass
+ else
+ __log_conf_ok
+ fi
+ return 0
+ fi
+ if [ $1 == "TEST" ]; then
+ __log_test_fail_status_code $2 $status
+ else
+ __log_conf_fail_status_code $2 $status
+ fi
+ return 1
+}
+
+# upload helmchart
+# arg: <chart-name>
+chartmus_upload_test_chart() {
+ __log_conf_start $@
+ if [ $# -ne 1 ]; then
+ __print_err "<chart-name>" $@
+ return 1
+ fi
+ chart_path=$TESTENV_TEMP_FILES/$1"-0.1.0.tgz"
+ if [ ! -f "$chart_path" ]; then
+ echo -e $RED" Cannot find package chart: $chart_path"$ERED
+ __log_conf_fail_general
+ return 1
+ fi
+ __execute_curl_to_chartmuseum CONF 201 "curl --data-binary @$chart_path $CHART_MUS_SERVICE_PATH/api/charts"
+}
+
+# delete helmchart
+# arg: <chart-name> [<version>]
+chartmus_delete_test_chart() {
+ __log_conf_start $@
+ if [ $# -gt 2 ]; then
+ __print_err "<chart-name> [<version>]" $@
+ return 1
+ fi
+ if [ $# -eq 1 ]; then
+ chart_path="/$1/0.1.0"
+ else
+ chart_path="/$1/$2"
+ fi
+ __execute_curl_to_chartmuseum CONF 200 "curl -X DELETE $CHART_MUS_SERVICE_PATH/api/charts"$chart_path
+}
\ No newline at end of file
__kube_delete_all_resources() {
echo " Delete all in namespace $1 ..."
namespace=$1
- resources="deployments replicaset statefulset services pods configmaps pvc "
+ resources="deployments replicaset statefulset services pods configmaps pvc serviceaccounts"
for restype in $resources; do
result=$(kubectl get $restype -n $namespace -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
}
__kube_delete_all_pv() {
- echo " Delete pv ..."
- resources="pv"
+ echo " Delete all non-namespaced resources ..."
+ resources="pv clusterrolebindings"
for restype in $resources; do
result=$(kubectl get $restype -o jsonpath='{.items[?(@.metadata.labels.autotest)].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
__CBS_statisics_setup() {
echo ""
}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CONSUL_test_requirements() {
+ :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CBS_test_requirements() {
+ :
+}
#######################################################
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CP_test_requirements() {
+ :
+}
+
#######################################################
done
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__CR_test_requirements() {
+ :
+}
+
#######################################################
################
fi
}
+# Tests if a variable value in the CR is equal to or greater than the target value and and optional timeout.
+# Arg: <variable-name> <target-value> - This test set pass or fail depending on if the variable is
+# equal to the target or not.
+# Arg: <cr-path-id> <variable-name> <target-value> <timeout-in-sec> - This test waits up to the timeout seconds
+# before setting pass or fail depending on if the variable value becomes equal to or greater than the target
+# value or not.
+# (Function for test scripts)
+cr_greater_or_equal() {
+ if [ $# -eq 3 ] || [ $# -eq 4 ]; then
+ CR_SERVICE_PATH=$(__cr_get_service_path $1)
+ CR_ADAPTER=$CR_SERVICE_PATH
+ if [ $? -ne 0 ]; then
+ __print_err "<cr-path-id> missing or incorrect" $@
+ return 1
+ fi
+ __var_test "CR" "$CR_SERVICE_PATH/counter/" $2 ">=" $3 $4
+ else
+ __print_err "Wrong args to cr_equal, needs three or four args: <cr-path-id> <variable-name> <target-value> [ timeout ]" $@
+ fi
+}
+
# Tests if a variable value in the CR contains the target string and and optional timeout
# Arg: <variable-name> <target-value> - This test set pass or fail depending on if the variable contains
# the target or not.
fi
}
-# Read a variable value from CR sim and send to stdout. Arg: <variable-name>
+# Read a variable value from CR sim and send to stdout. Arg: <cr-path-id> <variable-name>
cr_read() {
CR_SERVICE_PATH=$(__cr_get_service_path $1)
CR_ADAPTER=$CR_SERVICE_PATH
__print_err "<cr-path-id> missing or incorrect" $@
return 1
fi
- echo "$(__do_curl $CR_SERVICE_PATH/counter/$1)"
+ echo "$(__do_curl $CR_SERVICE_PATH/counter/$2)"
}
# Function to configure write delay on callbacks
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPADP_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap adapter
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPMED_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap mediator
export DMAAP_MED_DATA_MOUNT_PATH
export DMAAP_MED_HOST_MNT_DIR
- export DMAAP_MED_DATA_FILE
+ export DMAAP_MED_CONTR_DATA_FILE
export DMAAP_MED_DATA_CONFIGMAP_NAME=$DMAAP_MED_APP_NAME"-data"
if [ $1 == "PROXY" ]; then
export DMAAP_MED_CONF_SELF_HOST=$(echo $DMAAP_MED_SERVICE_PATH | cut -d: -f1-2)
export DMAAP_MED_CONF_SELF_PORT=$(echo $DMAAP_MED_SERVICE_PATH | cut -d: -f3)
export MR_SERVICE_PATH
+ export MR_KAFKA_SERVICE_PATH
+
}
# Start the Dmaap mediator
__dmaapmed_export_vars $1
# Create config map for data
- data_json=$PWD/tmp/$DMAAP_MED_DATA_FILE
+ data_json=$PWD/tmp/$DMAAP_MED_CONTR_DATA_FILE
if [ $# -lt 2 ]; then
#create empty dummy file
echo "{}" > $data_json
__dmaapmed_export_vars $1
- dest_file=$SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_HOST_MNT_DIR/$DMAAP_MED_DATA_FILE
+ dest_file=$SIM_GROUP/$DMAAP_MED_COMPOSE_DIR/$DMAAP_MED_HOST_MNT_DIR/$DMAAP_MED_CONTR_DATA_FILE
envsubst < $2 > $dest_file
exit 1
else
echo " OK, code: "$status" (Expected)"
- if [[ "$content_type" == *"$resp_content"* ]]; then
+ if [[ "$resp_content" == '*' ]]; then
+ :
+ elif [[ "$content_type" == *"$resp_content"* ]]; then
echo " Content type: "$content_type" (Expected)"
else
echo " Expected content type: "$resp_content
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This script format http endpoint stats generated by testscripts
+
+print_usage() {
+ echo "Usage: format_endpoint_stats <log-base-dir> <app-id> <app-description> [tc-id]+ "
+}
+
+SUMMARYFILE=""
+SUMMARYFILE_TMP=""
+
+update_summary() {
+
+ input=$@
+ inputarr=(${input// / })
+ inputp=${inputarr[3]}
+ inputn=${inputarr[4]}
+ inputposarr=(${inputp//\// })
+ inputnegarr=(${inputn//\// })
+ > $SUMMARYFILE_TMP
+ found=0
+ while read -r line; do
+ linearr=(${line// / })
+ linep=${linearr[3]}
+ linen=${linearr[4]}
+ lineposarr=(${linep//\// })
+ linenegarr=(${linen//\// })
+ if [[ ${linearr[1]} == ${inputarr[1]} ]] && [[ ${linearr[2]} == ${inputarr[2]} ]]; then
+ let lineposarr[0]=lineposarr[0]+inputposarr[0]
+ let lineposarr[1]=lineposarr[1]+inputposarr[1]
+ let linenegarr[0]=linenegarr[0]+inputnegarr[0]
+ let linenegarr[1]=linenegarr[1]+inputnegarr[1]
+ found=1
+ fi
+ printf '%-2s %-10s %-45s %-16s %-16s' "#" "${linearr[1]}" "${linearr[2]}" "${lineposarr[0]}/${lineposarr[1]}" "${linenegarr[0]}/${linenegarr[1]}" >> $SUMMARYFILE_TMP
+ echo "" >> $SUMMARYFILE_TMP
+ done < $SUMMARYFILE
+ if [ $found -eq 0 ]; then
+ printf '%-2s %-10s %-45s %-16s %-16s' "#" "${inputarr[1]}" "${inputarr[2]}" "${inputposarr[0]}/${inputposarr[1]}" "${inputnegarr[0]}/${inputnegarr[1]}" >> $SUMMARYFILE_TMP
+ echo "" >> $SUMMARYFILE_TMP
+ fi
+ cp $SUMMARYFILE_TMP $SUMMARYFILE
+}
+
+if [ $# -lt 4 ]; then
+ print_usage
+ exit 1
+fi
+BASE_DIR=$1
+if [ ! -d $BASE_DIR ]; then
+ print_usage
+ echo "<log-base-dir> $BASE_DIR does not exist or is not a dir"
+ exit 1
+fi
+SUMMARYFILE=$BASE_DIR/endpoint_summary.log
+rm $SUMMARYFILE
+touch $SUMMARYFILE
+SUMMARYFILE_TMP=$BASE_DIR/endpoint_summary_tmp.log
+TC_FAIL=0
+shift
+APP_ID=$1
+shift
+echo ""
+echo "==================================================="
+echo "Functional test cases for $1"
+echo "==================================================="
+echo
+shift
+while [ $# -gt 0 ]; do
+ FTC_DIR=$BASE_DIR/$1
+ if [ ! -d $FTC_DIR ]; then
+ echo "Dir $FTC_DIR does not exist"
+ exit 1
+ fi
+ IMAGE_INFO_FILE=$FTC_DIR/imageinfo_$APP_ID".log"
+ if [ -f $IMAGE_INFO_FILE ]; then
+ echo "=== Testscript: $1 ==="
+ echo "Image: "$(cat $IMAGE_INFO_FILE)
+ echo
+ TC_RES_FILE=$FTC_DIR/.result$1.txt
+ if [ -f "$TC_RES_FILE" ]; then
+ TC_RESULT=$(< "$TC_RES_FILE")
+ if [ $TC_RESULT -ne 0 ]; then
+ echo " !!!!! TESTCASE FAILED !!!!!"
+ let TC_FAIL=TC_FAIL+1
+ fi
+ fi
+ echo "=== Results: positive=2XX http status, negative=non 2XX http status - (ok/total)==="
+ echo "Method Endpoint Positive Negative"
+ grep --no-filename "#" $FTC_DIR/endpoint_$APP_ID* | cut -c 4-
+ for filename in $FTC_DIR/endpoint_$APP_ID* ; do
+ filedata=$(< $filename)
+ update_summary $filedata
+ done
+ echo "==============================="
+ echo
+ else
+ echo "=== No stats collected by Testscript $1 ==="
+ echo ""
+ fi
+ shift
+done
+
+echo "Summary of all testscripts"
+if [ $TC_FAIL -ne 0 ]; then
+ echo " !!!!! ONE OR MORE TESTCASE(S) FAILED - CHECK INDIVIDUAL TEST RESULT!!!!!"
+fi
+echo "=== Results: positive=2XX http status, negative=non 2XX http status - (ok/total)==="
+echo "Method Endpoint Positive Negative"
+cat $SUMMARYFILE | cut -c 4-
+
+exit 0
+
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This is a script that contains container/service managemnt functions test functions for Helm Manager
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: [<image-tag-suffix>] (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__HELMMANAGER_imagesetup() {
+ __check_and_create_image_var HELMMANAGER "HELM_MANAGER_IMAGE" "HELM_MANAGER_IMAGE_BASE" "HELM_MANAGER_IMAGE_TAG" $1 "$HELM_MANAGER_DISPLAY_NAME"
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both arg var may contain: 'remote', 'remote-remove' or 'local'
+__HELMMANAGER_imagepull() {
+ __check_and_pull_image $1 "$HELM_MANAGER_DISPLAY_NAME" $HELM_MANAGER_APP_NAME HELM_MANAGER_IMAGE
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__HELMMANAGER_image_data() {
+ echo -e "$HELM_MANAGER_DISPLAY_NAME\t$(docker images --format $1 $HELM_MANAGER_IMAGE)" >> $2
+ if [ ! -z "$HELM_MANAGER_IMAGE_SOURCE" ]; then
+ echo -e "-- source image --\t$(docker images --format $1 $HELM_MANAGER_IMAGE_SOURCE)" >> $2
+ fi
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__HELMMANAGER_kube_scale_zero() {
+ __kube_scale_all_resources $KUBE_NONRTRIC_NAMESPACE autotest HELMMANAGER
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__HELMMANAGER_kube_scale_zero_and_wait() {
+ __kube_scale_and_wait_all_resources $KUBE_NONRTRIC_NAMESPACE app "$KUBE_NONRTRIC_NAMESPACE"-"$HELM_MANAGER_APP_NAME"
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__HELMMANAGER_kube_delete_all() {
+ __kube_delete_all_resources $KUBE_NONRTRIC_NAMESPACE autotest HELMMANAGER
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__HELMMANAGER_store_docker_logs() {
+ if [ $RUNMODE == "KUBE" ]; then
+ kubectl logs -l "autotest=HELMMANAGER" -n $KUBE_NONRTRIC_NAMESPACE --tail=-1 > $1$2_helmmanager.log 2>&1
+ else
+ docker logs $HELM_MANAGER_APP_NAME > $1$2_helmmanager.log 2>&1
+ fi
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__HELMMANAGER_initial_setup() {
+ use_helm_manager_http
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HELMMANAGER_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "HELMMANAGER $HELM_MANAGER_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "HELMMANAGER $HELM_MANAGER_APP_NAME"
+ fi
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__HELMMANAGER_test_requirements() {
+ tmp=$(which helm)
+ if [ $? -ne 0 ]; then
+ echo $RED" Helm3 is required for running helm manager tests. Pls install helm3"
+ exit 1
+ fi
+ tmp_version=$(helm version | grep 'v3')
+ if [ -z "$tmp_version" ]; then
+ echo $RED" Helm3 is required for running helm manager tests. Pls install helm3"
+ exit 1
+ fi
+}
+
+#######################################################
+
+# Set http as the protocol to use for all communication to the Helm Manager
+# args: -
+# (Function for test scripts)
+use_helm_manager_http() {
+ __helm_manager_set_protocoll "http" $HELM_MANAGER_INTERNAL_PORT $HELM_MANAGER_EXTERNAL_PORT
+}
+
+# Set https as the protocol to use for all communication to the Helm Manager
+# args: -
+# (Function for test scripts)
+use_helm_manager_https() {
+ __helm_manager_set_protocoll "https" $HELM_MANAGER_INTERNAL_SECURE_PORT $HELM_MANAGER_EXTERNAL_SECURE_PORT
+}
+
+# Setup paths to svc/container for internal and external access
+# args: <protocol> <internal-port> <external-port>
+__helm_manager_set_protocoll() {
+ echo -e $BOLD"$HELM_MANAGER_DISPLAY_NAME protocol setting"$EBOLD
+ echo -e " Using $BOLD $1 $EBOLD towards $HELM_MANAGER_DISPLAY_NAME"
+
+ ## Access to Helm Manager
+
+ HELMMANAGER_SERVICE_PATH=$1"://$HELM_MANAGER_USER:$HELM_MANAGER_PWD@"$HELM_MANAGER_APP_NAME":"$2 # docker access, container->container and script->container via proxy
+ if [ $RUNMODE == "KUBE" ]; then
+ HELMMANAGER_SERVICE_PATH=$1"://$HELM_MANAGER_USER:$HELM_MANAGER_PWD@"$HELM_MANAGER_APP_NAME.$KUBE_NONRTRIC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+ fi
+
+ echo ""
+}
+
+# Export env vars for config files, docker compose and kube resources
+# args:
+__helm_manager_export_vars() {
+
+ export HELM_MANAGER_APP_NAME
+ export HELM_MANAGER_DISPLAY_NAME
+
+ export DOCKER_SIM_NWNAME
+ export KUBE_NONRTRIC_NAMESPACE
+
+ export HELM_MANAGER_EXTERNAL_PORT
+ export HELM_MANAGER_INTERNAL_PORT
+ export HELM_MANAGER_EXTERNAL_SECURE_PORT
+ export HELM_MANAGER_INTERNAL_SECURE_PORT
+ export HELM_MANAGER_CLUSTER_ROLE
+ export HELM_MANAGER_SA_NAME
+ export HELM_MANAGER_ALIVE_URL
+ export HELM_MANAGER_COMPOSE_DIR
+ export HELM_MANAGER_USER
+ export HELM_MANAGER_PWD
+}
+
+# Start the Helm Manager container
+# args: -
+# (Function for test scripts)
+start_helm_manager() {
+
+ echo -e $BOLD"Starting $HELM_MANAGER_DISPLAY_NAME"$EBOLD
+
+ if [ $RUNMODE == "KUBE" ]; then
+
+ # Check if app shall be fully managed by the test script
+ __check_included_image "HELMMANAGER"
+ retcode_i=$?
+
+ # Check if app shall only be used by the testscipt
+ __check_prestarted_image "HELMMANAGER"
+ retcode_p=$?
+
+ if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
+ echo -e $RED"The $HELM_MANAGER_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+ echo -e $RED"The $HELM_MANAGER_APP_NAME will not be started"$ERED
+ exit
+ fi
+ if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
+ echo -e $RED"The $HELM_MANAGER_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+ echo -e $RED"The $HELM_MANAGER_APP_NAME will not be started"$ERED
+ exit
+ fi
+
+ if [ $retcode_p -eq 0 ]; then
+ echo -e " Using existing $HELM_MANAGER_APP_NAME deployment and service"
+ echo " Setting $HELM_MANAGER_APP_NAME replicas=1"
+ __kube_scale sts $HELM_MANAGER_APP_NAME $KUBE_NONRTRIC_NAMESPACE 1
+ fi
+
+ if [ $retcode_i -eq 0 ]; then
+
+ echo -e " Creating $HELM_MANAGER_APP_NAME app and expose service"
+
+ #Check if nonrtric namespace exists, if not create it
+ __kube_create_namespace $KUBE_NONRTRIC_NAMESPACE
+
+ __helm_manager_export_vars
+
+ #Create sa
+ input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"sa.yaml
+ output_yaml=$PWD/tmp/helmmanager_sa_svc.yaml
+ __kube_create_instance sa $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+
+ #Create service
+ input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"svc.yaml
+ output_yaml=$PWD/tmp/helmmanager_svc.yaml
+ __kube_create_instance service $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+
+ #Create app
+ input_yaml=$SIM_GROUP"/"$HELM_MANAGER_COMPOSE_DIR"/"app.yaml
+ output_yaml=$PWD/tmp/helmmanager_app.yaml
+ __kube_create_instance app $HELM_MANAGER_APP_NAME $input_yaml $output_yaml
+ fi
+
+ __check_service_start $HELM_MANAGER_APP_NAME $HELMMANAGER_SERVICE_PATH$HELM_MANAGER_ALIVE_URL
+
+ else
+ __check_included_image 'HELMMANAGER'
+ if [ $? -eq 1 ]; then
+ echo -e $RED"The Helm Manager app is not included as managed in this test script"$ERED
+ echo -e $RED"The Helm Manager will not be started"$ERED
+ exit
+ fi
+
+ __helm_manager_export_vars
+
+ __start_container $HELM_MANAGER_COMPOSE_DIR "" NODOCKERARGS 1 $HELM_MANAGER_APP_NAME
+
+ __check_service_start $HELM_MANAGER_APP_NAME $HELMMANAGER_SERVICE_PATH$HELM_MANAGER_ALIVE_URL
+ fi
+ echo ""
+}
+
+# Excute a curl cmd towards the helm manager.
+# args: GET <path>
+# args: POST <path> <file-to-post>
+# args: POST3 <path> <name> <file-to-post> <name> <file-to-post> <name> <file-to-post>
+__execute_curl_to_helmmanger() {
+ TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
+ echo "(${BASH_LINENO[0]}) - ${TIMESTAMP}: ${FUNCNAME[0]}" $@ >> $HTTPLOG
+ proxyflag=""
+ if [ ! -z "$KUBE_PROXY_PATH" ]; then
+ if [ $KUBE_PROXY_HTTPX == "http" ]; then
+ proxyflag=" --proxy $KUBE_PROXY_PATH"
+ else
+ proxyflag=" --proxy-insecure --proxy $KUBE_PROXY_PATH"
+ fi
+ fi
+ if [ $1 == "GET" ]; then
+ curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2"
+ elif [ $1 == "POST" ]; then
+ curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X POST --data-binary @$3 -H Content-Type:application/json"
+ elif [ $1 == "POST1_2" ]; then
+ curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X POST -F $3=<$4 -F $5=@$6 -F $7=@$8 "
+ elif [ $1 == "DELETE" ]; then
+ curlstring="curl -skw %{http_code} $proxyflag $HELMMANAGER_SERVICE_PATH$2 -X DELETE"
+ else
+ echo " Unknown operation $1" >> $HTTPLOG
+ echo "000"
+ return 1
+ fi
+ echo " CMD: $curlstring" >> $HTTPLOG
+ res="$($curlstring)"
+ retcode=$?
+ echo " RESP: $res" >> $HTTPLOG
+ if [ $retcode -ne 0 ]; then
+ echo "000"
+ return 1
+ fi
+ echo $res
+ return 0
+}
+
+# API Test function: GET ​/helm/charts
+# args: <response-code> [ EMPTY | ( <chart> <version> <namespace> <release> <repo> )+ ]
+# (Function for test scripts)
+helm_manager_api_get_charts() {
+ __log_test_start $@
+
+ error_params=1
+ variablecount=$(($#-1))
+ if [ $# -eq 1 ]; then
+ error_params=0
+ elif [ $# -eq 2 ] && [ $2 == "EMPTY" ]; then
+ error_params=0
+ elif [ $(($variablecount%5)) -eq 0 ]; then
+ error_params=0
+ fi
+
+
+ if [ $error_params -eq 1 ]; then
+ __print_err "<response-code>" $@
+ return 1
+ fi
+
+ query="/helm/charts"
+ res="$(__execute_curl_to_helmmanger GET $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ if [ $# -gt 1 ]; then
+ body=${res:0:${#res}-3}
+ shift
+ if [ $# -eq 1 ]; then
+ targetJson='{"charts":[]}'
+ else
+ targetJson='{"charts":['
+ arr=(${@})
+ for ((i=0; i<$#; i=i+5)); do
+ if [ "$i" -gt 0 ]; then
+ targetJson=$targetJson","
+ fi
+ chart_version=${arr[$i+2]}
+ if [ $chart_version == "DEFAULT-VERSION" ]; then
+ chart_version="0.1.0"
+ fi
+ targetJson=$targetJson'{"releaseName":"'${arr[$i+3]}'","chartId":{"name":"'${arr[$i+1]}'","version":"'0.1.0'"},"namespace":"'${arr[$i+4]}'","repository":{"repoName":"'${arr[$i+0]}'","protocol":null,"address":null,"port":null,"userName":null,"password":null},"overrideParams":null}'
+ done
+ targetJson=$targetJson']}'
+ fi
+ echo " TARGET JSON: $targetJson" >> $HTTPLOG
+ res=$(python3 ../common/compare_json.py "$targetJson" "$body")
+
+ if [ $res -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ fi
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# API Test function: POST ​/helm/repo - add repo
+# args: <response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>
+# (Function for test scripts)
+helm_manager_api_post_repo() {
+ __log_test_start $@
+
+ if [ $# -ne 5 ]; then
+ __print_err "<response-code> <repo-name> <repo-protocol> <repo-address> <repo-port>" $@
+ return 1
+ fi
+
+ query="/helm/repo"
+ file="./tmp/cm-repo.json"
+ file_data='{"address" : "'$4'","repoName": "'$2'","protocol": "'$3'","port": "'$5'"}'
+ echo $file_data > $file
+ echo " FILE: $file_data" >> $HTTPLOG
+ res="$(__execute_curl_to_helmmanger POST $query $file)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# API Test function: POST /helm/onboard/chart - onboard chart
+# args: <response-code> <repo> <chart> <version> <release> <namespace>
+# (Function for test scripts)
+helm_manager_api_post_onboard_chart() {
+ __log_test_start $@
+
+ if [ $# -ne 6 ]; then
+ __print_err "<response-code> <repo> <chart> <version> <release> <namespace> " $@
+ return 1
+ fi
+
+ query="/helm/onboard/chart"
+ file="./tmp/chart.json"
+ chart_version=$4
+ if [ $chart_version == "DEFAULT-VERSION" ]; then
+ chart_version="0.1.0"
+ fi
+ file_data='{"chartId":{"name":"'$3'","version":"'$chart_version'"},"namespace":"'$6'","repository":{"repoName":"'$2'"},"releaseName":"'$5'"}'
+ echo $file_data > $file
+ echo " FILE - ($file): $file_data" >> $HTTPLOG
+ file2="./tmp/override.yaml"
+ echo "" >> $file2
+ file3="$TESTENV_TEMP_FILES/"$3"-"$chart_version".tgz"
+ res="$(__execute_curl_to_helmmanger POST1_2 $query info $file values $file2 chart $file3)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# API Test function: POST /helm/install - install chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_post_install_chart() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <chart> <version>" $@
+ return 1
+ fi
+
+ query="/helm/install"
+ file="./tmp/app-installation.json"
+ chart_version=$3
+ if [ $chart_version == "DEFAULT-VERSION" ]; then
+ chart_version="0.1.0"
+ fi
+ file_data='{"name": "'$2'","version": "'$chart_version'"}'
+ echo $file_data > $file
+ echo " FILE - ($file): $file_data" >> $HTTPLOG
+ res="$(__execute_curl_to_helmmanger POST $query $file)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# API Test function: DELETE /helm/uninstall - uninstall chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_uninstall_chart() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <chart> <version> " $@
+ return 1
+ fi
+
+ chart_version=$3
+ if [ $chart_version == "DEFAULT-VERSION" ]; then
+ chart_version="0.1.0"
+ fi
+ query="/helm/uninstall/$2/$chart_version"
+ res="$(__execute_curl_to_helmmanger DELETE $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# API Test function: DELETE /helm/chart - delete chart
+# args: <response-code> <chart> <version>
+# (Function for test scripts)
+helm_manager_api_delete_chart() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <chart> <version> " $@
+ return 1
+ fi
+
+ chart_version=$3
+ if [ $chart_version == "DEFAULT-VERSION" ]; then
+ chart_version="0.1.0"
+ fi
+ query="/helm/chart/$2/$chart_version"
+ res="$(__execute_curl_to_helmmanger DELETE $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# Config function: Add repo in helm manager by helm using exec
+# args: <repo-name> <repo-url>
+# (Function for test scripts)
+helm_manager_api_exec_add_repo() {
+ __log_conf_start $@
+
+ if [ $# -ne 2 ]; then
+ __print_err "<repo-name> <repo-url>" $@
+ return 1
+ fi
+
+ if [ $RUNMODE == "DOCKER" ]; then
+ retmsg=$(docker exec -it $HELM_MANAGER_APP_NAME helm repo add $1 $2)
+ retcode=$?
+ if [ $retcode -ne 0 ]; then
+ __log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
+ return 1
+ fi
+ else
+ retmsg=$(kubectl exec -it $HELM_MANAGER_APP_NAME -n $KUBE_NONRTRIC_NAMESPACE -- helm repo add $1 $2)
+ retcode=$?
+ if [ $retcode -ne 0 ]; then
+ __log_conf_fail_general " Cannot add repo to helm, return code: $retcode, msg: $retmsg"
+ return 1
+ fi
+ fi
+ __log_conf_ok
+ return 0
+}
+
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__HTTPPROXY_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to the http proxy
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__ICS_test_requirements() {
+ :
+}
+
#######################################################
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__KAFKAPC_test_requirements() {
+ :
+}
+
#######################################################
#######################################################
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__KUBEPROXY_test_requirements() {
+ :
+}
+
#######################################################
## Access to Kube http proxy
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This is a script that contains function to handle helm on localhost
+
+
+################ Test engine functions ################
+
+# Create the image var used during the test
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__LOCALHELM_imagesetup() {
+ :
+}
+
+# Pull image from remote repo or use locally built image
+# arg: <pull-policy-override> <pull-policy-original>
+# <pull-policy-override> Shall be used for images allowing overriding. For example use a local image when test is started to use released images
+# <pull-policy-original> Shall be used for images that does not allow overriding
+# Both var may contain: 'remote', 'remote-remove' or 'local'
+__LOCALHELM_imagepull() {
+ :
+}
+
+# Build image (only for simulator or interfaces stubs owned by the test environment)
+# arg: <image-tag-suffix> (selects staging, snapshot, release etc)
+# <image-tag-suffix> is present only for images with staging, snapshot,release tags
+__LOCALHELM_imagebuild() {
+ :
+}
+
+# Generate a string for each included image using the app display name and a docker images format string
+# If a custom image repo is used then also the source image from the local repo is listed
+# arg: <docker-images-format-string> <file-to-append>
+__LOCALHELM_image_data() {
+ :
+}
+
+# Scale kubernetes resources to zero
+# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
+# This function is called for apps fully managed by the test script
+__LOCALHELM_kube_scale_zero() {
+ :
+}
+
+# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
+# This function is called for prestarted apps not managed by the test script.
+__LOCALHELM_kube_scale_zero_and_wait() {
+ :
+}
+
+# Delete all kube resouces for the app
+# This function is called for apps managed by the test script.
+__LOCALHELM_kube_delete_all() {
+ :
+}
+
+# Store docker logs
+# This function is called for apps managed by the test script.
+# args: <log-dir> <file-prexix>
+__LOCALHELM_store_docker_logs() {
+ :
+}
+
+# Initial setup of protocol, host and ports
+# This function is called for apps managed by the test script.
+# args: -
+__LOCALHELM_initial_setup() {
+ :
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__LOCALHELM_statisics_setup() {
+ :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__LOCALHELM_test_requirements() {
+ tmp=$(which helm)
+ if [ $? -ne 0 ]; then
+ echo $RED" Helm3 is required for running this test. Pls install helm3"
+ exit 1
+ fi
+ tmp_version=$(helm version | grep 'v3')
+ if [ -z "$tmp_version" ]; then
+ echo $RED" Helm3 is required for running this test. Pls install helm3"
+ exit 1
+ fi
+}
+
+#######################################################
+
+
+# Create a dummy helmchart
+# arg: <chart-name>
+localhelm_create_test_chart() {
+ __log_conf_start $@
+ if [ $# -ne 1 ]; then
+ __print_err "<path-to-chart-dir>" $@
+ return 1
+ fi
+ if [[ "$1" == *"/"* ]]; then
+ echo -e $RED"Chart name cannot contain '/'"
+ __log_conf_fail_general
+ return 1
+ fi
+ helm create $TESTENV_TEMP_FILES/$1 | indent1
+ if [ $? -ne 0 ]; then
+ __log_conf_fail_general
+ return 1
+ fi
+ __log_conf_ok
+ return 0
+}
+
+# Package a created helmchart
+# arg: <chart-name>
+localhelm_package_test_chart() {
+ __log_conf_start $@
+ if [ $# -ne 1 ]; then
+ __print_err "<path-to-chart-dir>" $@
+ return 1
+ fi
+ if [[ "$1" == *"/"* ]]; then
+ echo -e $RED"Chart name cannot contain '/'"
+ __log_conf_fail_general
+ return 1
+ fi
+ helm package -d $TESTENV_TEMP_FILES $TESTENV_TEMP_FILES/$1 | indent1
+ if [ $? -ne 0 ]; then
+ __log_conf_fail_general
+ return 1
+ fi
+ __log_conf_ok
+ return 0
+}
+
+# Check if a release is installed
+# arg: INSTALLED|NOTINSTALLED <release-name> <name-space>
+localhelm_installed_chart_release() {
+ __log_test_start $@
+ if [ $# -ne 3 ]; then
+ __print_err "INSTALLED|NOTINSTALLED <release-name> <name-space>" $@
+ return 1
+ fi
+ if [ $1 != "INSTALLED" ] && [ $1 != "NOTINSTALLED" ]; then
+ __print_err "INSTALLED|NOTINSTALLED <release-name> <name-space>" $@
+ return 1
+ fi
+
+ filter="helm ls -n $3 --filter ^$2"
+ res=$($filter -q)
+ if [ $? -ne 0 ]; then
+ __log_test_fail_general "Failed to list helm releases"
+ return 1
+ fi
+ if [ $1 == "INSTALLED" ]; then
+ if [ "$res" != $2 ]; then
+ echo -e "$RED Release $2 does not exists $ERED"
+ __log_test_fail_general
+ return 1
+ fi
+ elif [ $1 == "NOTINSTALLED" ]; then
+ if [ "$res" == $2 ]; then
+ __log_test_fail_general "Release $2 exists"
+ return 1
+ fi
+ fi
+ echo " Currently installed releases in namespace $3"
+ helm ls -n $3 | indent2
+ __log_test_pass
+ return 0
+}
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__MR_test_requirements() {
+ :
+}
+
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__DMAAPMR_test_requirements() {
+ :
+}
+
#######################################################
# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__NGW_test_requirements() {
+ :
+}
+
#######################################################
#!/bin/bash
# ============LICENSE_START===============================================
-# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
# ========================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PA_test_requirements() {
+ :
+}
+
#######################################################
__check_service_start $POLICY_AGENT_APP_NAME $PA_SERVICE_PATH$POLICY_AGENT_ALIVE_URL
fi
+
+ __collect_endpoint_stats_image_info "PMS" $POLICY_AGENT_IMAGE
echo ""
return 0
}
fi
fi
fi
-
+ __collect_endpoint_stats "PMS" 00 "GET" $PMS_API_PREFIX"/v2/policy-instances" $status
__log_test_pass
return 0
fi
fi
+ __collect_endpoint_stats "PMS" 01 "GET" $PMS_API_PREFIX"/v2/policies/{policy_id}" $status
__log_test_pass
return 0
}
__log_test_fail_status_code $1 $status
return 1
fi
-
let pid=$pid+1
let count=$count+1
echo -ne " Executed "$count"("$max")${SAMELINE}"
done
+ __collect_endpoint_stats "PMS" 02 "PUT" $PMS_API_PREFIX"/v2/policies" $status $max
echo ""
__log_test_pass
let count=$count+1
echo -ne " Accepted(batch) "$count"("$max")${SAMELINE}"
done
+ __collect_endpoint_stats "PMS" 02 "PUT" $PMS_API_PREFIX"/v2/policies" $1 $max
echo ""
fi
done
if [ -z $msg ]; then
+ __collect_endpoint_stats "PMS" 02 "PUT" $PMS_API_PREFIX"/v2/policies" $resp_code $(($count*$num_rics))
__log_test_pass " $(($count*$num_rics)) policy request(s) executed"
return 0
fi
__log_test_fail_status_code $1 $status
return 1
fi
+
let pid=$pid+1
let count=$count+1
echo -ne " Executed "$count"("$max")${SAMELINE}"
done
+ __collect_endpoint_stats "PMS" 03 "DELETE" $PMS_API_PREFIX"/v2/policies/{policy_id}" $status $max
echo ""
__log_test_pass
let count=$count+1
echo -ne " Deleted(batch) "$count"("$max")${SAMELINE}"
done
+ __collect_endpoint_stats "PMS" 03 "DELETE" $PMS_API_PREFIX"/v2/policies/{policy_id}" $1 $max
echo ""
fi
done
if [ -z $msg ]; then
+ __collect_endpoint_stats "PMS" 03 "DELETE" $PMS_API_PREFIX"/v2/policies/{policy_id}" $resp_code $(($count*$num_rics))
__log_test_pass " $(($count*$num_rics)) policy request(s) executed"
return 0
fi
fi
fi
+ __collect_endpoint_stats "PMS" 04 "GET" $PMS_API_PREFIX"/v2/policies" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 05 "GET" $PMS_API_PREFIX"/v2/policy-types/{policyTypeId}" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 06 "GET" $PMS_API_PREFIX"/v2/policy_schema" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 07 "GET" $PMS_API_PREFIX"/v2/policy-schemas" $status
__log_test_pass
return 0
}
# API Test function: GET /policy_status and V2 GET /policies/{policy_id}/status
-# arg: <response-code> <policy-id> (STD|STD2 <enforce-status>|EMPTY [<reason>|EMPTY])|(OSC <instance-status> <has-been-deleted>)
+# arg: <response-code> <policy-id> [ (STD|STD2 <enforce-status>|EMPTY [<reason>|EMPTY])|(OSC <instance-status> <has-been-deleted>) ]
# (Function for test scripts)
api_get_policy_status() {
__log_test_start $@
- if [ $# -lt 4 ] || [ $# -gt 5 ]; then
- __print_err "<response-code> <policy-id> (STD <enforce-status>|EMPTY [<reason>|EMPTY])|(OSC <instance-status> <has-been-deleted>)" $@
+ if [ $# -lt 2 ] || [ $# -gt 5 ]; then
+ __print_err "<response-code> <policy-id> [(STD <enforce-status>|EMPTY [<reason>|EMPTY])|(OSC <instance-status> <has-been-deleted>)]" $@
return 1
fi
targetJson=""
-
- if [ $3 == "STD" ]; then
+ if [ $# -eq 2 ]; then
+ :
+ elif [ "$3" == "STD" ]; then
targetJson="{\"enforceStatus\":\"$4\""
if [ $# -eq 5 ]; then
targetJson=$targetJson",\"reason\":\"$5\""
fi
targetJson=$targetJson"}"
- elif [ $3 == "STD2" ]; then
+ elif [ "$3" == "STD2" ]; then
if [ $4 == "EMPTY" ]; then
targetJson="{\"enforceStatus\":\"\""
else
fi
fi
targetJson=$targetJson"}"
- elif [ $3 == "OSC" ]; then
+ elif [ "$3" == "OSC" ]; then
targetJson="{\"instance_status\":\"$4\""
if [ $# -eq 5 ]; then
targetJson=$targetJson",\"has_been_deleted\":\"$5\""
__log_test_fail_status_code $1 $status
return 1
fi
+ if [ $# -gt 2 ]; then
+ echo "TARGET JSON: $targetJson" >> $HTTPLOG
+ body=${res:0:${#res}-3}
+ res=$(python3 ../common/compare_json.py "$targetJson" "$body")
- echo "TARGET JSON: $targetJson" >> $HTTPLOG
- body=${res:0:${#res}-3}
- res=$(python3 ../common/compare_json.py "$targetJson" "$body")
-
- if [ $res -ne 0 ]; then
- __log_test_fail_body
- return 1
+ if [ $res -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ fi
fi
-
+ __collect_endpoint_stats "PMS" 08 "GET" $PMS_API_PREFIX"/v2/policies/{policy_id}/status" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 09 "GET" $PMS_API_PREFIX"/v2/policy-types" $status
__log_test_pass
return 0
}
return 1
fi
+ __collect_endpoint_stats "PMS" 10 "GET" $PMS_API_PREFIX"/v2/status" $status
+ __log_test_pass
+ return 0
+}
+
+# API Test function: GET /status (root) without api prefix
+# args: <response-code>
+# (Function for test scripts)
+api_get_status_root() {
+ __log_test_start $@
+ if [ $# -ne 1 ]; then
+ __print_err "<response-code>" $@
+ return 1
+ fi
+ query="/status"
+ TMP_PREFIX=$PMS_API_PREFIX
+ PMS_API_PREFIX=""
+ res="$(__do_curl_to_api PA GET $query)"
+ PMS_API_PREFIX=$TMP_PREFIX
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+
+ __collect_endpoint_stats "PMS" 19 "GET" "/status" $status
__log_test_pass
return 0
}
fi
fi
fi
+
+ __collect_endpoint_stats "PMS" 11 "GET" $PMS_API_PREFIX"/v2/rics/ric" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 12 "GET" $PMS_API_PREFIX"/v2/rics" $status
__log_test_pass
return 0
}
return 1
fi
+ __collect_endpoint_stats "PMS" 13 "PUT" $PMS_API_PREFIX"/v2/service" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 14 "GET" $PMS_API_PREFIX"/v2/services" $status
__log_test_pass
return 0
}
return 1
fi
+ __collect_endpoint_stats "PMS" 14 "GET" $PMS_API_PREFIX"/v2/services" $status
__log_test_pass
return 0
}
return 1
fi
+ __collect_endpoint_stats "PMS" 15 "DELETE" $PMS_API_PREFIX"/v2/services/{serviceId}" $status
__log_test_pass
return 0
}
return 1
fi
+ __collect_endpoint_stats "PMS" 16 "PUT" $PMS_API_PREFIX"/v2/services/{service_id}/keepalive" $status
__log_test_pass
return 0
}
return 1
fi
inputJson=$(< $2)
- inputJson="{\"config\":"$inputJson"}"
+ if [ $RUNMODE == "DOCKER" ]; then #In kube the file already has a header
+ inputJson="{\"config\":"$inputJson"}"
+ fi
file="./tmp/.config.json"
echo $inputJson > $file
query="/v2/configuration"
return 1
fi
+ __collect_endpoint_stats "PMS" 17 "PUT" $PMS_API_PREFIX"/v2/configuration" $status
__log_test_pass
return 0
}
fi
fi
+ __collect_endpoint_stats "PMS" 18 "GET" $PMS_API_PREFIX"/v2/configuration" $status
__log_test_pass
return 0
}
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PRODSTUB_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Prod stub sim
retcode_p=$?
if [ $retcode_i -ne 0 ] && [ $retcode_p -ne 0 ]; then
- echo -e $RED"The $ICS_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
- echo -e $RED"The $ICS_APP_NAME will not be started"$ERED
+ echo -e $RED"The $PROD_STUB_APP_NAME app is not included as managed nor prestarted in this test script"$ERED
+ echo -e $RED"The $PROD_STUB_APP_NAME will not be started"$ERED
exit
fi
if [ $retcode_i -eq 0 ] && [ $retcode_p -eq 0 ]; then
- echo -e $RED"The $ICS_APP_NAME app is included both as managed and prestarted in this test script"$ERED
- echo -e $RED"The $ICS_APP_NAME will not be started"$ERED
+ echo -e $RED"The $PROD_STUB_APP_NAME app is included both as managed and prestarted in this test script"$ERED
+ echo -e $RED"The $PROD_STUB_APP_NAME will not be started"$ERED
exit
fi
echo ""
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__PVCCLEANER_test_requirements() {
+ :
+}
+
#######################################################
# This is a system app, all usage in testcase_common.sh
\ No newline at end of file
# <pull-policy-original> Shall be used for images that does not allow overriding
# Both arg var may contain: 'remote', 'remote-remove' or 'local'
__RC_imagepull() {
- __check_and_pull_image $1 "$c" $RAPP_CAT_APP_NAME RAPP_CAT_IMAGE
+ __check_and_pull_image $1 "$RAPP_CAT_DISPLAY_NAME" $RAPP_CAT_APP_NAME RAPP_CAT_IMAGE
}
# Generate a string for each included image using the app display name and a docker images format string
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__RC_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Rapp catalogue
done
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__RICSIM_test_requirements() {
+ :
+}
+
#######################################################
done
fi
else
-
__check_included_image 'RICSIM'
if [ $? -eq 1 ]; then
echo -e $RED"The Near-RT RIC Simulator app is not included as managed in this test script"$ERED
export DOCKER_SIM_NWNAME
export RIC_SIM_DISPLAY_NAME
- docker_args="--no-recreate --scale $RICSIM_COMPOSE_SERVICE_NAME=$2"
+ docker_args=" --scale $RICSIM_COMPOSE_SERVICE_NAME=$2"
#Create a list of contsiner names
#Will be <ricsim-prefix>_<service-name>_<index>
fi
}
+# Check application requirements, e.g. helm, the the test needs. Exit 1 if req not satisfied
+# args: -
+__SDNC_test_requirements() {
+ :
+}
+
#######################################################
# Set http as the protocol to use for all communication to SDNC
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
ICS_APP_NAME="informationservice" # Name for ICS container
ICS_DISPLAY_NAME="Enrichment Coordinator Service" # Display name for ICS container
# Policy Agent image and tags
POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.6-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.6-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.6-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.5"
# SDNC A1 Controller remote image and tag
SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
-SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.2.1-SNAPSHOT" ###CHECK THIS
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.1-STAGING-latest"
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.2.1-STAGING-latest" #Will use snapshot repo
-SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.2.1"
+SDNC_A1_CONTROLLER_IMAGE_TAG_LOCAL="2.2.5-SNAPSHOT" ###CHECK THIS
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_SNAPSHOT="2.2.5-STAGING-latest"
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE="2.2.5-STAGING-latest" #Will use snapshot repo
+SDNC_A1_CONTROLLER_IMAGE_TAG_REMOTE_RELEASE="2.2.4"
#SDNC DB remote image and tag
#The DB is part of SDNC so handled in the same way as SDNC
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
ICS_APP_NAME="informationservice" # Name for ICS container
ICS_DISPLAY_NAME="Enrichment Coordinator Service" # Display name for ICS container
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
ICS_APP_NAME="informationservice" # Name for ICS container
ICS_DISPLAY_NAME="Enrichment Coordinator Service" # Display name for ICS container
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
ICS_APP_NAME="informationservice" # Name for ICS container
ICS_DISPLAY_NAME="Enrichment Coordinator Service" # Display name for ICS container
# Policy Agent base image and tags
-POLICY_AGENT_IMAGE_BASE="o-ran-sc/nonrtric-policy-agent"
-POLICY_AGENT_IMAGE_TAG_LOCAL="2.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="2.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="2.3.0"
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="2.3.0"
+POLICY_AGENT_IMAGE_BASE="o-ran-sc/nonrtric-a1-policy-management-service"
+POLICY_AGENT_IMAGE_TAG_LOCAL="2.3.1-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="2.3.1-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="2.3.1"
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="2.3.1"
# ICS image and tags
ICS_IMAGE_BASE="o-ran-sc/nonrtric-information-coordinator-service"
-ICS_IMAGE_TAG_LOCAL="1.2.0-SNAPSHOT"
-ICS_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.0-SNAPSHOT"
-ICS_IMAGE_TAG_REMOTE="1.2.0"
-ICS_IMAGE_TAG_REMOTE_RELEASE="1.2.0"
+ICS_IMAGE_TAG_LOCAL="1.2.1-SNAPSHOT"
+ICS_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.1-SNAPSHOT"
+ICS_IMAGE_TAG_REMOTE="1.2.1"
+ICS_IMAGE_TAG_REMOTE_RELEASE="1.2.1"
#Note: Update var ICS_FEATURE_LEVEL if image version is changed
#Control Panel image and tags
# Gateway image and tags
NRT_GATEWAY_IMAGE_BASE="o-ran-sc/nonrtric-gateway"
-NRT_GATEWAY_IMAGE_TAG_LOCAL="1.1.0-SNAPSHOT"
-NRT_GATEWAY_IMAGE_TAG_REMOTE_SNAPSHOT="1.1.0-SNAPSHOT"
-NRT_GATEWAY_IMAGE_TAG_REMOTE="1.1.0"
-NRT_GATEWAY_IMAGE_TAG_REMOTE_RELEASE="1.1.0"
+NRT_GATEWAY_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
+NRT_GATEWAY_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
+NRT_GATEWAY_IMAGE_TAG_REMOTE="1.0.0"
+NRT_GATEWAY_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
# SDNC A1 Controller image and tags - Note using released honolulu ONAP image
# RAPP Catalogue image and tags
RAPP_CAT_IMAGE_BASE="o-ran-sc/nonrtric-r-app-catalogue"
-RAPP_CAT_IMAGE_TAG_LOCAL="1.1.0-SNAPSHOT"
-RAPP_CAT_IMAGE_TAG_REMOTE_SNAPSHOT="1.1.0-SNAPSHOT"
-RAPP_CAT_IMAGE_TAG_REMOTE="1.1.0"
-RAPP_CAT_IMAGE_TAG_REMOTE_RELEASE="1.1.0"
+RAPP_CAT_IMAGE_TAG_LOCAL="1.0.2-SNAPSHOT"
+RAPP_CAT_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.2-SNAPSHOT"
+RAPP_CAT_IMAGE_TAG_REMOTE="1.0.2"
+RAPP_CAT_IMAGE_TAG_REMOTE_RELEASE="1.0.2"
# Near RT RIC Simulator image and tags - same version as cherry
# DMAAP Mediator Service
DMAAP_MED_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-mediator-producer"
-DMAAP_MED_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
-DMAAP_MED_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
-DMAAP_MED_IMAGE_TAG_REMOTE="1.0.0"
-DMAAP_MED_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+DMAAP_MED_IMAGE_TAG_LOCAL="1.0.1-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.1-SNAPSHOT"
+DMAAP_MED_IMAGE_TAG_REMOTE="1.0.1"
+DMAAP_MED_IMAGE_TAG_REMOTE_RELEASE="1.0.1"
# DMAAP Adapter Service
DMAAP_ADP_IMAGE_BASE="o-ran-sc/nonrtric-dmaap-adaptor"
-DMAAP_ADP_IMAGE_TAG_LOCAL="1.0.0-SNAPSHOT"
-DMAAP_ADP_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.0-SNAPSHOT"
-DMAAP_ADP_IMAGE_TAG_REMOTE="1.0.0"
-DMAAP_ADP_IMAGE_TAG_REMOTE_RELEASE="1.0.0"
+DMAAP_ADP_IMAGE_TAG_LOCAL="1.0.1-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE_SNAPSHOT="1.0.1-SNAPSHOT"
+DMAAP_ADP_IMAGE_TAG_REMOTE="1.0.1"
+DMAAP_ADP_IMAGE_TAG_REMOTE_RELEASE="1.0.1"
+
+# Helm Manager
+HELM_MANAGER_IMAGE_BASE="o-ran-sc/nonrtric-helm-manager"
+HELM_MANAGER_IMAGE_TAG_LOCAL="1.1.1-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE_SNAPSHOT="1.1.1-SNAPSHOT"
+HELM_MANAGER_IMAGE_TAG_REMOTE="1.1.1"
+HELM_MANAGER_IMAGE_TAG_REMOTE_RELEASE="1.1.1"
#Consul remote image and tag
CONSUL_IMAGE_BASE="consul"
KAFKAPC_IMAGE_TAG_LOCAL="latest"
#No local image for pvc cleaner, remote image always used
+#PVC Cleaner remote image and tag
+CHART_MUS_IMAGE_BASE="ghcr.io/helm/chartmuseum"
+CHART_MUS_IMAGE_TAG_REMOTE_OTHER="v0.13.1"
+#No local image for chart museum, remote image always used
+
# List of app short names produced by the project
-PROJECT_IMAGES_APP_NAMES="PA ICS CP RC RICSIM NGW DMAAPADP DMAAPMED" # Add SDNC here if oran image is used
+PROJECT_IMAGES_APP_NAMES="PA ICS CP RC RICSIM NGW DMAAPADP DMAAPMED HELMMANAGER" # Add SDNC here if oran image is used
# List of app short names which images pulled from ORAN
ORAN_IMAGES_APP_NAMES="" # Not used
POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file name
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
+PMS_FEATURE_LEVEL="" # Space separated list of features
ICS_APP_NAME="informationservice" # Name for ICS container
ICS_DISPLAY_NAME="Information Coordinator Service" # Display name for ICS container
DMAAP_MED_COMPOSE_DIR="dmaapmed" # Dir in simulator_group for docker-compose
#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
-DMAAP_MED_DATA_FILE="type_config.json" # Container data file name
+DMAAP_MED_HOST_DATA_FILE="type_config.json" # Host data file name
+DMAAP_MED_CONTR_DATA_FILE="type_config.json" # Container data file name
+DMAAP_MED_FEATURE_LEVEL="" # Space separated list of features
KAFKAPC_APP_NAME="kafka-procon" # Name for the Kafka procon
-KAFKAPC_DISPLAY_NAME="Kafaka Producer/Consumer"
+KAFKAPC_DISPLAY_NAME="Kafka Producer/Consumer"
KAFKAPC_EXTERNAL_PORT=8096 # Kafka procon container external port (host -> container)
KAFKAPC_INTERNAL_PORT=8090 # Kafka procon container internal port (container -> container)
KAFKAPC_EXTERNAL_SECURE_PORT=8097 # Kafka procon container external secure port (host -> container)
KAFKAPC_ALIVE_URL="/" # Base path for alive check
KAFKAPC_COMPOSE_DIR="kafka-procon" # Dir in simulator_group for docker-compose
KAFKAPC_BUILD_DIR="kafka-procon" # Build dir
+
+CHART_MUS_APP_NAME="chartmuseum" # Name for the chart museum app
+CHART_MUS_DISPLAY_NAME="Chart Museum"
+CHART_MUS_EXTERNAL_PORT=8201 # chart museum container external port (host -> container)
+CHART_MUS_INTERNAL_PORT=8080 # chart museum container internal port (container -> container)
+CHART_MUS_ALIVE_URL="/health" # Base path for alive check
+CHART_MUS_COMPOSE_DIR="chartmuseum" # Dir in simulator_group for docker-compose
+CHART_MUS_CHART_CONTR_CHARTS="/tmp/charts" # Local dir container for charts
+
+HELM_MANAGER_APP_NAME="helmmanagerservice" # Name for the helm manager app
+HELM_MANAGER_DISPLAY_NAME="Helm Manager"
+HELM_MANAGER_EXTERNAL_PORT=8211 # helm manager container external port (host -> container)
+HELM_MANAGER_INTERNAL_PORT=8083 # helm manager container internal port (container -> container)
+HELM_MANAGER_EXTERNAL_SECURE_PORT=8212 # helm manager container external secure port (host -> container)
+HELM_MANAGER_INTERNAL_SECURE_PORT=8443 # helm manager container internal secure port (container -> container)
+HELM_MANAGER_CLUSTER_ROLE=cluster-admin # Kubernetes cluster role for helm manager
+HELM_MANAGER_SA_NAME=helm-manager-sa # Service account name
+HELM_MANAGER_ALIVE_URL="/helm/charts" # Base path for alive check
+HELM_MANAGER_COMPOSE_DIR="helmmanager" # Dir in simulator_group for docker-compose
+HELM_MANAGER_USER="helmadmin"
+HELM_MANAGER_PWD="itisasecret"
+
########################################
# Setting for common curl-base function
########################################
echo " [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+] [--use-snapshot-image <app-nam>+]"
echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
echo " [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
- echo " [--override <override-environment-filename> --pre-clean --gen-stats]"
+ echo " [--override <override-environment-filename>] [--pre-clean] [--gen-stats] [--delete-namespaces]"
+ echo " [--delete-containers] [--endpoint-stats]"
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
echo "--override <file> - Override setting from the file supplied by --env-file"
echo "--pre-clean - Will clean kube resouces when running docker and vice versa"
echo "--gen-stats - Collect container/pod runtime statistics"
-
+ echo "--delete-namespaces - Delete kubernetes namespaces before starting tests - but only those created by the test scripts. Kube mode only. Ignored if running with prestarted apps."
+ echo "--delete-containers - Delete docker containers before starting tests - but only those created by the test scripts. Docker mode only."
+ echo "--endpoint-stats - Collect endpoint statistics"
echo ""
echo "List of app short names supported: "$APP_SHORT_NAMES
exit 0
# files in the ./tmp is moved to ./tmp/prev when a new test is started
if [ ! -d "tmp" ]; then
mkdir tmp
+ if [ $? -ne 0 ]; then
+ echo "Cannot create dir for temp files, $PWD/tmp"
+ echo "Exiting...."
+ exit 1
+ fi
fi
curdir=$PWD
cd tmp
if [ $? -ne 0 ]; then
echo "Cannot cd to $PWD/tmp"
- echo "Dir cannot be created. Exiting...."
+ echo "Exiting...."
+ exit 1
fi
+
+TESTENV_TEMP_FILES=$PWD
+
if [ ! -d "prev" ]; then
mkdir prev
+ if [ $? -ne 0 ]; then
+ echo "Cannot create dir for previous temp files, $PWD/prev"
+ echo "Exiting...."
+ exit 1
+ fi
fi
+
+TMPFILES=$(ls -A | grep -vw prev)
+if [ ! -z "$TMPFILES" ]; then
+ cp -r $TMPFILES prev #Move all temp files to prev dir
+ if [ $? -ne 0 ]; then
+ echo "Cannot move temp files in $PWD to previous temp files in, $PWD/prev"
+ echo "Exiting...."
+ exit 1
+ fi
+ if [ $(pwd | xargs basename) == "tmp" ]; then #Check that current dir is tmp...for safety
+
+ rm -rf $TMPFILES # Remove all temp files
+ fi
+fi
+
cd $curdir
-mv ./tmp/* ./tmp/prev 2> /dev/null
+if [ $? -ne 0 ]; then
+ echo "Cannot cd to $curdir"
+ echo "Exiting...."
+ exit 1
+fi
+
# Create a http message log for this testcase
HTTPLOG=$PWD"/.httplog_"$ATC".txt"
rm $TESTLOGS/$ATC/*.txt &> /dev/null
rm $TESTLOGS/$ATC/*.json &> /dev/null
+#Create result file in the log dir
+echo "1" > "$TESTLOGS/$ATC/.result$ATC.txt"
+
# Log all output from the test case to a TC log
TCLOG=$TESTLOGS/$ATC/TC.log
exec &> >(tee ${TCLOG})
#Var to control if container/pod runtim statistics shall be collected
COLLECT_RUNTIME_STATS=0
+COLLECT_RUNTIME_STATS_PID=0
+
+#Var to control if endpoint statistics shall be collected
+COLLECT_ENDPOINT_STATS=0
+
+#Var to control if namespaces shall be delete before test setup
+DELETE_KUBE_NAMESPACES=0
+
+#Var to control if containers shall be delete before test setup
+DELETE_CONTAINERS=0
#File to keep deviation messages
DEVIATION_FILE=".tmp_deviations"
}
trap trap_fnc ERR
-# Trap to kill subprocesses
-trap "kill 0" EXIT
+# Trap to kill subprocess for stats collection (if running)
+trap_fnc2() {
+ if [ $COLLECT_RUNTIME_STATS_PID -ne 0 ]; then
+ kill $COLLECT_RUNTIME_STATS_PID
+ fi
+}
+trap trap_fnc2 EXIT
# Counter for tests
TEST_SEQUENCE_NR=1
__print_current_stats
}
+# Function to collect stats on endpoints
+# args: <app-id> <end-point-no> <http-operation> <end-point-url> <http-status> [<count>]
+__collect_endpoint_stats() {
+ if [ $COLLECT_ENDPOINT_STATS -eq 0 ]; then
+ return
+ fi
+ ENDPOINT_COUNT=1
+ if [ $# -gt 5 ]; then
+ ENDPOINT_COUNT=$6
+ fi
+ ENDPOINT_STAT_FILE=$TESTLOGS/$ATC/endpoint_$ATC_$1_$2".log"
+ ENDPOINT_POS=0
+ ENDPOINT_NEG=0
+ if [ -f $ENDPOINT_STAT_FILE ]; then
+ ENDPOINT_VAL=$(< $ENDPOINT_STAT_FILE)
+ ENDPOINT_POS=$(echo $ENDPOINT_VAL | cut -f4 -d ' ' | cut -f1 -d '/')
+ ENDPOINT_NEG=$(echo $ENDPOINT_VAL | cut -f5 -d ' ' | cut -f1 -d '/')
+ fi
+
+ if [ $5 -ge 200 ] && [ $5 -lt 300 ]; then
+ let ENDPOINT_POS=ENDPOINT_POS+$ENDPOINT_COUNT
+ else
+ let ENDPOINT_NEG=ENDPOINT_NEG+$ENDPOINT_COUNT
+ fi
+
+ printf '%-2s %-10s %-45s %-16s %-16s' "#" "$3" "$4" "$ENDPOINT_POS/$ENDPOINT_POS" "$ENDPOINT_NEG/$ENDPOINT_NEG" > $ENDPOINT_STAT_FILE
+}
+
+# Function to collect stats on endpoints
+# args: <app-id> <image-info>
+__collect_endpoint_stats_image_info() {
+ if [ $COLLECT_ENDPOINT_STATS -eq 0 ]; then
+ return
+ fi
+ ENDPOINT_STAT_FILE=$TESTLOGS/$ATC/imageinfo_$ATC_$1".log"
+ echo $POLICY_AGENT_IMAGE > $ENDPOINT_STAT_FILE
+}
+
#Var for measuring execution time
TCTEST_START=$SECONDS
TIMER_MEASUREMENTS=".timer_measurement.txt"
echo -e "Activity \t Duration \t Info" > $TIMER_MEASUREMENTS
-# If this is set, some images (control by the parameter repo-polcy) will be re-tagged and pushed to this repo before any
+# If this is set, some images (controlled by the parameter repo-policy) will be re-tagged and pushed to this repo before any
IMAGE_REPO_ADR=""
IMAGE_REPO_POLICY="local"
CLUSTER_TIME_OUT=0
foundparm=0
fi
fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--delete-namespaces" ]; then
+ if [ $RUNMODE == "DOCKER" ]; then
+ DELETE_KUBE_NAMESPACES=0
+ echo "Option ignored - Delete namespaces (ignored when running docker)"
+ else
+ if [ -z "KUBE_PRESTARTED_IMAGES" ]; then
+ DELETE_KUBE_NAMESPACES=0
+ echo "Option ignored - Delete namespaces (ignored when using prestarted apps)"
+ else
+ DELETE_KUBE_NAMESPACES=1
+ echo "Option set - Delete namespaces"
+ fi
+ fi
+ shift;
+ foundparm=0
+ fi
+ fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--delete-containers" ]; then
+ if [ $RUNMODE == "DOCKER" ]; then
+ DELETE_CONTAINERS=1
+ echo "Option set - Delete containers started by previous test(s)"
+ else
+ echo "Option ignored - Delete containers (ignored when running kube)"
+ fi
+ shift;
+ foundparm=0
+ fi
+ fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--endpoint-stats" ]; then
+ COLLECT_ENDPOINT_STATS=1
+ echo "Option set - Collect endpoint statistics"
+ shift;
+ foundparm=0
+ fi
+ fi
done
echo ""
done
echo " Auto-adding system app $padded_iapp Sourcing $file_pointer"
. $file_pointer
+ if [ $? -ne 0 ]; then
+ echo " Include file $file_pointer contain errors. Exiting..."
+ exit 1
+ fi
__added_apps=" $iapp "$__added_apps
done
else
padded_iapp=$padded_iapp" "
done
echo " Auto-adding included app $padded_iapp Sourcing $file_pointer"
- . $file_pointer
if [ ! -f "$file_pointer" ]; then
echo " Include file $file_pointer for app $iapp does not exist"
exit 1
fi
+ . $file_pointer
+ if [ $? -ne 0 ]; then
+ echo " Include file $file_pointer contain errors. Exiting..."
+ exit 1
+ fi
fi
done
echo ""
exit 1
fi
+ echo " Node(s) and container runtime config"
+ kubectl get nodes -o wide | indent2
fi
fi
# If the image suffix is none, then the component decides the suffix
function_pointer="__"$imagename"_imagesetup"
$function_pointer $IMAGE_SUFFIX
+
+ function_pointer="__"$imagename"_test_requirements"
+ $function_pointer
fi
done
#Temp var to check for image pull errors
IMAGE_ERR=0
- # The following sequence pull the configured images
+ # Delete namespaces
+ echo -e $BOLD"Deleting namespaces"$EBOLD
+
+
+ if [ "$DELETE_KUBE_NAMESPACES" -eq 1 ]; then
+ test_env_namespaces=$(kubectl get ns --no-headers -o custom-columns=":metadata.name" -l autotest=engine) #Get list of ns created by the test env
+ if [ $? -ne 0 ]; then
+ echo " Cannot get list of namespaces...ignoring delete"
+ else
+ for test_env_ns in $test_env_namespaces; do
+ __kube_delete_namespace $test_env_ns
+ done
+ fi
+ else
+ echo " Namespace delete option not set"
+ fi
+ echo ""
+ # Delete containers
+ echo -e $BOLD"Deleting containers"$EBOLD
+ if [ "$DELETE_CONTAINERS" -eq 1 ]; then
+ echo " Stopping containers label 'nrttest_app'..."
+ docker stop $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null
+ echo " Removing stopped containers..."
+ docker rm $(docker ps -qa --filter "label=nrttest_app") 2> /dev/null
+ else
+ echo " Contatiner delete option not set"
+ fi
+ echo ""
+
+ # The following sequence pull the configured images
echo -e $BOLD"Pulling configured images, if needed"$EBOLD
if [ ! -z "$IMAGE_REPO_ADR" ] && [ $IMAGE_REPO_POLICY == "local" ]; then
echo -e $YELLOW" Excluding all remote image check/pull when running with image repo: $IMAGE_REPO_ADR and image policy $IMAGE_REPO_POLICY"$EYELLOW
if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS &
+ COLLECT_RUNTIME_STATS_PID=$!
fi
}
fi
#Create file with OK exit code
echo "0" > "$AUTOTEST_HOME/.result$ATC.txt"
+ echo "0" > "$TESTLOGS/$ATC/.result$ATC.txt"
else
echo -e "One or more tests with status \033[31m\033[1mFAIL\033[0m "
echo -e "\033[31m\033[1m ___ _ ___ _ \033[0m"
if [ $STOP_AT_ERROR -eq 1 ]; then
echo -e $RED"Test script configured to stop at first FAIL, taking all logs and stops"$ERED
store_logs "STOP_AT_ERROR"
+
+ # Update test suite counter
+ if [ -f .tmp_tcsuite_fail_ctr ]; then
+ tmpval=$(< .tmp_tcsuite_fail_ctr)
+ ((tmpval++))
+ echo $tmpval > .tmp_tcsuite_fail_ctr
+ fi
+ if [ -f .tmp_tcsuite_fail ]; then
+ echo " - "$ATC " -- "$TC_ONELINE_DESCR" Execution stopped due to error" >> .tmp_tcsuite_fail
+ fi
exit 1
fi
return 0
namespace=$1
labelname=$2
labelid=$3
- resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes"
+ resources="deployments replicaset statefulset services pods configmaps persistentvolumeclaims persistentvolumes serviceaccounts clusterrolebindings"
deleted_resourcetypes=""
for restype in $resources; do
ns_flag="-n $namespace"
ns_flag=""
ns_text=""
fi
+ if [ $restype == "clusterrolebindings" ]; then
+ ns_flag=""
+ ns_text=""
+ fi
result=$(kubectl get $restype $ns_flag -o jsonpath='{.items[?(@.metadata.labels.'$labelname'=="'$labelid'")].metadata.name}')
if [ $? -eq 0 ] && [ ! -z "$result" ]; then
deleted_resourcetypes=$deleted_resourcetypes" "$restype
echo " Message: $(<./tmp/kubeerr)"
return 1
else
+ kubectl label ns $1 autotest=engine
echo -e " Creating namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN"
fi
else
return 0
}
+# Removes a namespace if it exists
+# args: <namespace>
+# (Not for test scripts)
+__kube_delete_namespace() {
+
+ #Check if test namespace exists, if so remove it
+ kubectl get namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ if [ $? -eq 0 ]; then
+ echo -ne " Removing namespace "$1 $SAMELINE
+ kubectl delete namespace $1 1> /dev/null 2> ./tmp/kubeerr
+ if [ $? -ne 0 ]; then
+ echo -e " Removing namespace $1 $RED$BOLD FAILED $EBOLD$ERED"
+ ((RES_CONF_FAIL++))
+ echo " Message: $(<./tmp/kubeerr)"
+ return 1
+ else
+ echo -e " Removing namespace $1 $GREEN$BOLD OK $EBOLD$EGREEN"
+ fi
+ else
+ echo -e " Namespace $1 $GREEN$BOLD does not exist, OK $EBOLD$EGREEN"
+ fi
+ return 0
+}
+
+# Removes a namespace
+# args: <namespace>
+# (Not for test scripts)
+clean_and_create_namespace() {
+ __log_conf_start $@
+
+ if [ $# -ne 1 ]; then
+ __print_err "<namespace>" $@
+ return 1
+ fi
+ __kube_delete_namespace $1
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ __kube_create_namespace $1
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+
+}
+
# Find the host ip of an app (using the service resource)
# args: <app-name> <namespace>
# (Not for test scripts)
__clean_kube
if [ $PRE_CLEAN -eq 1 ]; then
echo " Cleaning docker resouces to free up resources, may take time..."
- ../common/clean_docker.sh 2&>1 /dev/null
+ ../common/clean_docker.sh 2>&1 /dev/null
echo ""
fi
else
__clean_containers
if [ $PRE_CLEAN -eq 1 ]; then
echo " Cleaning kubernetes resouces to free up resources, may take time..."
- ../common/clean_kube.sh 2&>1 /dev/null
+ ../common/clean_kube.sh 2>&1 /dev/null
echo ""
fi
fi
echo -e $RED" Got: "${FUNCNAME[1]} ${@:2} $ERED
fi
((RES_CONF_FAIL++))
+ __check_stop_at_error
}
# Function to create the docker network for the test
envsubst < $compose_file > "gen_"$compose_file
compose_file="gen_"$compose_file
+ if [ $DOCKER_COMPOSE_VERION == "V1" ]; then
+ docker_compose_cmd="docker-compose"
+ else
+ docker_compose_cmd="docker compose"
+ fi
if [ "$compose_args" == "NODOCKERARGS" ]; then
- docker-compose -f $compose_file up -d &> .dockererr
+ $docker_compose_cmd -f $compose_file up -d &> .dockererr
if [ $? -ne 0 ]; then
echo -e $RED"Problem to launch container(s) with docker-compose"$ERED
cat .dockererr
exit 1
fi
else
- docker-compose -f $compose_file up -d $compose_args &> .dockererr
+ $docker_compose_cmd -f $compose_file up -d $compose_args &> .dockererr
if [ $? -ne 0 ]; then
echo -e $RED"Problem to launch container(s) with docker-compose"$ERED
cat .dockererr
__check_stop_at_error
return
fi
- elif [ $4 = "=" ] && [ "$result" -eq $5 ]; then
+ elif [ "$4" == "=" ] && [ "$result" -eq $5 ]; then
((RES_PASS++))
echo -e " Result=${result} after ${duration} seconds${SAMELINE}"
echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds"
__print_current_stats
return
- elif [ $4 = ">" ] && [ "$result" -gt $5 ]; then
+ elif [ "$4" == ">" ] && [ "$result" -gt $5 ]; then
((RES_PASS++))
echo -e " Result=${result} after ${duration} seconds${SAMELINE}"
echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds"
__print_current_stats
return
- elif [ $4 = "<" ] && [ "$result" -lt $5 ]; then
+ elif [ "$4" == "<" ] && [ "$result" -lt $5 ]; then
((RES_PASS++))
echo -e " Result=${result} after ${duration} seconds${SAMELINE}"
echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds"
__print_current_stats
return
- elif [ $4 = "contain_str" ] && [[ $result =~ $5 ]]; then
+ elif [ "$4" == ">=" ] && [ "$result" -ge $5 ]; then
+ ((RES_PASS++))
+ echo -e " Result=${result} after ${duration} seconds${SAMELINE}"
+ echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds"
+ __print_current_stats
+ return
+ elif [ "$4" == "contain_str" ] && [[ $result =~ $5 ]]; then
((RES_PASS++))
echo -e " Result=${result} after ${duration} seconds${SAMELINE}"
echo -e $GREEN" PASS${EGREEN} - Result=${result} after ${duration} seconds"
echo -e $RED" FAIL ${ERED}- ${3} ${4} ${5} not reached, result = ${result}"
__print_current_stats
__check_stop_at_error
- elif [ $4 = "=" ] && [ "$result" -eq $5 ]; then
+ elif [ "$4" == "=" ] && [ "$result" -eq $5 ]; then
+ ((RES_PASS++))
+ echo -e $GREEN" PASS${EGREEN} - Result=${result}"
+ __print_current_stats
+ elif [ "$4" == ">" ] && [ "$result" -gt $5 ]; then
((RES_PASS++))
echo -e $GREEN" PASS${EGREEN} - Result=${result}"
__print_current_stats
- elif [ $4 = ">" ] && [ "$result" -gt $5 ]; then
+ elif [ "$4" == "<" ] && [ "$result" -lt $5 ]; then
((RES_PASS++))
echo -e $GREEN" PASS${EGREEN} - Result=${result}"
__print_current_stats
- elif [ $4 = "<" ] && [ "$result" -lt $5 ]; then
+ elif [ "$4" == ">=" ] && [ "$result" -ge $5 ]; then
((RES_PASS++))
echo -e $GREEN" PASS${EGREEN} - Result=${result}"
__print_current_stats
- elif [ $4 = "contain_str" ] && [[ $result =~ $5 ]]; then
+ elif [ "$4" == "contain_str" ] && [[ $result =~ $5 ]]; then
((RES_PASS++))
echo -e $GREEN" PASS${EGREEN} - Result=${result}"
__print_current_stats
#
# List of short names for all supported apps, including simulators etc
-APP_SHORT_NAMES="PA ICS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY DMAAPMED DMAAPADP PVCCLEANER KAFKAPC"
+APP_SHORT_NAMES="PA ICS SDNC CP NGW RC RICSIM HTTPPROXY CBS CONSUL DMAAPMR MR CR PRODSTUB KUBEPROXY DMAAPMED DMAAPADP PVCCLEANER KAFKAPC CHARTMUS HELMMANAGER LOCALHELM"
# List of available apps that built and released of the project
-PROJECT_IMAGES="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
+PROJECT_IMAGES="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP HELMMANAGER"
# List of available apps to override with local or remote staging/snapshot/release image
-AVAILABLE_IMAGES_OVERRIDE="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP"
+AVAILABLE_IMAGES_OVERRIDE="PA ICS SDNC CP NGW RICSIM RC DMAAPMED DMAAPADP HELMMANAGER"
# List of available apps where the image is built by the test environment
LOCAL_IMAGE_BUILD="MR CR PRODSTUB KUBEPROXY HTTPPROXY KAFKAPC"
# Metrics vars
cntr_msg_callbacks=0
+cntr_batch_callbacks=0
cntr_msg_fetched=0
cntr_callbacks={}
hosts_set=set()
APP_READ_URL="/get-event/<string:id>"
APP_READ_ALL_URL="/get-all-events/<string:id>"
DUMP_ALL_URL="/db"
+NULL_URL="/callbacks-null" # Url for ignored callback. Callbacks are not checked, counted or stored
MIME_TEXT="text/plain"
MIME_JSON="application/json"
if (id in cntr_callbacks.keys()):
cntr_callbacks[id][0] += 1
+ cntr_callbacks[id][2] += 1
else:
cntr_callbacks[id]=[]
cntr_callbacks[id].append(1)
cntr_callbacks[id].append(0)
+ cntr_callbacks[id].append(0)
except Exception as e:
print(CAUGHT_EXCEPTION+str(e))
def events_write_mr(id):
global msg_callbacks
global cntr_msg_callbacks
+ global cntr_batch_callbacks
storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
#Large payloads will otherwise overload the server
if (storeas is None):
print("raw data: str(request.data): "+str(request.data))
do_delay()
+ list_data=False
try:
#if (request.content_type == MIME_JSON):
if (MIME_JSON in request.content_type):
msg_list = json.loads(data)
if (storeas is None):
print("Payload(json): "+str(msg_list))
+ list_data=True
else:
msg_list=[]
print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
with lock:
remote_host_logging(request)
+ if (list_data):
+ cntr_batch_callbacks += 1
for msg in msg_list:
if (storeas is None):
msg=json.loads(msg)
cntr_callbacks[id]=[]
cntr_callbacks[id].append(1)
cntr_callbacks[id].append(0)
+ cntr_callbacks[id].append(0)
+ if (id in msg_callbacks.keys() and list_data):
+ cntr_callbacks[id][2] += 1
except Exception as e:
print(CAUGHT_EXCEPTION+str(e))
def events_write_text(id):
global msg_callbacks
global cntr_msg_callbacks
+ global cntr_batch_callbacks
storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
#Large payloads will otherwise overload the server
try:
msg_list=None
+ list_data=False
if (MIME_JSON in request.content_type): #Json array of strings
msg_list=json.loads(request.data)
+ list_data=True
else:
data=request.data.decode("utf-8") #Assuming string
msg_list=[]
msg_list.append(data)
+ with lock:
+ cntr_batch_callbacks += 1
+ for msg in msg_list:
+ if (storeas == "md5"):
+ md5msg={}
+ print("msg: "+str(msg))
+ print("msg (endcode str): "+str(msg.encode('utf-8')))
+ md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+ msg=md5msg
+ print("msg (data converted to md5 hash): "+str(msg["md5"]))
+
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
- for msg in msg_list:
- if (storeas == "md5"):
- md5msg={}
- print("msg: "+str(msg))
- print("msg (endcode str): "+str(msg.encode('utf-8')))
- md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
- msg=md5msg
- print("msg (data converted to md5 hash): "+str(msg["md5"]))
-
- if (isinstance(msg, dict)):
- msg[TIME_STAMP]=str(datetime.now())
-
- with lock:
cntr_msg_callbacks += 1
if (id in msg_callbacks.keys()):
msg_callbacks[id].append(msg)
cntr_callbacks[id]=[]
cntr_callbacks[id].append(1)
cntr_callbacks[id].append(0)
+ cntr_callbacks[id].append(0)
+ if (id in cntr_callbacks.keys() and list_data):
+ cntr_callbacks[id][2] += 1
except Exception as e:
print(CAUGHT_EXCEPTION+str(e))
traceback.print_exc()
return 'OK',200
-### Functions for test ###
+# Receive a callback message but ignore contents and return 200
+# URI and payload, (PUT or POST): /callbacks-text/<id> <text message>
+# response: OK 200
+@app.route(NULL_URL,
+ methods=['PUT','POST'])
+def null_url(id):
+ return 'OK',200
# Dump the whole db of current callbacks
# URI and parameter, (GET): /db
else:
return Response(str("0"), status=200, mimetype=MIME_TEXT)
+@app.route('/counter/received_callback_batches',
+ methods=['GET'])
+def batches_submitted():
+ req_id = request.args.get('id')
+ if (req_id is None):
+ return Response(str(cntr_batch_callbacks), status=200, mimetype=MIME_TEXT)
+
+ if (req_id in cntr_callbacks.keys()):
+ return Response(str(cntr_callbacks[req_id][2]), status=200, mimetype=MIME_TEXT)
+ else:
+ return Response(str("0"), status=200, mimetype=MIME_TEXT)
+
@app.route('/counter/fetched_callbacks',
methods=['GET'])
def requests_fetched():
global msg_callbacks
global cntr_msg_fetched
global cntr_msg_callbacks
+ global cntr_batch_callbacks
global cntr_callbacks
global forced_settings
msg_callbacks={}
cntr_msg_fetched=0
cntr_msg_callbacks=0
+ cntr_batch_callbacks=0
cntr_callbacks={}
forced_settings['delay']=None
# source function to do curl and check result
. ../common/do_curl_function.sh
+RESP_CONTENT='*' #Dont check resp content type
+
echo "=== CR hello world ==="
RESULT="OK"
do_curl GET / 200
RESULT="0"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="0"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks 200
RESULT="2"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="2"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks 200
RESULT="2"
do_curl GET /counter/received_callbacks?id=test 200
+echo "=== Get counter - callback batches ==="
+RESULT="2"
+do_curl GET /counter/received_callback_batches?id=test 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks?id=test 200
RESULT="0"
do_curl GET /counter/received_callbacks?id=dummy 200
+echo "=== Get counter - callback batches ==="
+RESULT="0"
+do_curl GET /counter/received_callback_batches?id=dummy 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks?id=dummy 200
RESULT="2"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="2"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="2"
do_curl GET /counter/fetched_callbacks 200
RESULT="2"
do_curl GET /counter/received_callbacks?id=test 200
+echo "=== Get counter - callback batches ==="
+RESULT="2"
+do_curl GET /counter/received_callback_batches?id=test 200
+
echo "=== Get counter - fetched events ==="
RESULT="2"
do_curl GET /counter/fetched_callbacks?id=test 200
RESULT="5"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="5"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="2"
do_curl GET /counter/fetched_callbacks 200
RESULT="1"
do_curl GET /counter/received_callbacks?id=test1 200
+echo "=== Get counter - callback batches ==="
+RESULT="1"
+do_curl GET /counter/received_callback_batches?id=test1 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks?id=test1 200
RESULT="5"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="5"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="4"
do_curl GET /counter/fetched_callbacks 200
RESULT="1"
do_curl GET /counter/current_messages 200
+echo "=== Send a request ==="
+RESULT="*"
+#create payload
+echo "[{\"DATA-MSG\":\"msg\"},{\"DATA-MSG\":\"msg\"}]" > .tmp.json
+do_curl POST '/callbacks-text/test' 200 .tmp.json
+
+echo "=== Get counter - callbacks ==="
+RESULT="7"
+do_curl GET /counter/received_callbacks 200
+
+echo "=== Get counter - callback batches ==="
+RESULT="6"
+do_curl GET /counter/received_callback_batches 200
+
+echo "=== Get counter - fetched events ==="
+RESULT="4"
+do_curl GET /counter/fetched_callbacks 200
+
+echo "=== Get counter - current events ==="
+RESULT="3"
+do_curl GET /counter/current_messages 200
+
+
echo "=== CR reset ==="
RESULT="OK"
do_curl GET /reset 200
RESULT="0"
do_curl GET /counter/received_callbacks 200
+echo "=== Get counter - callback batches ==="
+RESULT="0"
+do_curl GET /counter/received_callback_batches 200
+
echo "=== Get counter - fetched events ==="
RESULT="0"
do_curl GET /counter/fetched_callbacks 200
.dockererr
.env
.payload
+kafkaprocon
--- /dev/null
+.tmp.json
+.dockererr
+gen_docker-compose*
\ No newline at end of file
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: $CHART_MUS_APP_NAME
+ namespace: $KUBE_SIM_NAMESPACE
+ labels:
+ run: $CHART_MUS_APP_NAME
+ autotest: CHARTMUS
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ run: $CHART_MUS_APP_NAME
+ template:
+ metadata:
+ labels:
+ run: $CHART_MUS_APP_NAME
+ autotest: CHARTMUS
+ spec:
+ containers:
+ - name: $CHART_MUS_APP_NAME
+ image: $CHART_MUS_IMAGE
+ imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+ ports:
+ - name: http
+ containerPort: $CHART_MUS_INTERNAL_PORT
+ env:
+ - name: STORAGE
+ value: "local"
+ - name: STORAGE_LOCAL_ROOTDIR
+ value: ${CHART_MUS_CHART_CONTR_CHARTS}
+ - name: DEBUG
+ value: "1"
+# Selector will be set when pod is started first time
+ nodeSelector:
+
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+version: '3.0'
+networks:
+ default:
+ external: true
+ name: ${DOCKER_SIM_NWNAME}
+services:
+ chartmuseum:
+ networks:
+ - default
+ container_name: ${CHART_MUS_APP_NAME}
+ image: ${CHART_MUS_IMAGE}
+ ports:
+ - ${CHART_MUS_EXTERNAL_PORT}:${CHART_MUS_INTERNAL_PORT}
+ environment:
+ - STORAGE=local
+ - STORAGE_LOCAL_ROOTDIR=${CHART_MUS_CHART_CONTR_CHARTS}
+ - DEBUG=1
+ labels:
+ - "nrttest_app=CHARTMUS"
+ - "nrttest_dp=${CHART_MUS_DISPLAY_NAME}"
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: $CHART_MUS_APP_NAME
+ namespace: $KUBE_SIM_NAMESPACE
+ labels:
+ run: $CHART_MUS_APP_NAME
+ autotest: CHARTMUS
+spec:
+ type: ClusterIP
+ ports:
+ - port: $CHART_MUS_EXTERNAL_PORT
+ targetPort: $CHART_MUS_INTERNAL_PORT
+ protocol: TCP
+ name: htt
+ selector:
+ run: $CHART_MUS_APP_NAME
\ No newline at end of file
- name: https
containerPort: $DMAAP_MED_INTERNAL_SECURE_PORT
volumeMounts:
- - mountPath: $DMAAP_MED_DATA_MOUNT_PATH/$DMAAP_MED_DATA_FILE
- subPath: $DMAAP_MED_DATA_FILE
+ - mountPath: $DMAAP_MED_DATA_MOUNT_PATH/$DMAAP_MED_CONTR_DATA_FILE
+ subPath: $DMAAP_MED_CONTR_DATA_FILE
name: dmaapadp-data-name
env:
- name: INFO_PRODUCER_HOST
value: "$MR_SERVICE_PATH"
- name: LOG_LEVEL
value: Debug
+ - name: KAFKA_BOOTSTRAP_SERVERS
+ value: "$MR_KAFKA_SERVICE_PATH"
volumes:
- configMap:
defaultMode: 420
- INFO_COORD_ADDR=${ICS_SERVICE_PATH}
- DMAAP_MR_ADDR=${MR_SERVICE_PATH}
- LOG_LEVEL=Debug
+ - KAFKA_BOOTSTRAP_SERVERS=${MR_KAFKA_SERVICE_PATH}
volumes:
- - ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
+ - ${DMAAP_MED_HOST_MNT_DIR}/${DMAAP_MED_CONTR_DATA_FILE}:${DMAAP_MED_DATA_MOUNT_PATH}/${DMAAP_MED_CONTR_DATA_FILE}
labels:
- "nrttest_app=DMAAPMED"
- "nrttest_dp=${DMAAP_MED_DISPLAY_NAME}"
--- /dev/null
+{
+ "types":
+ [
+ {
+ "id": "STD_Fault_Messages",
+ "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100"
+ },
+ {
+ "id": "Kafka_TestTopic",
+ "kafkaInputTopic": "unauthenticated.dmaapmed_kafka.text"
+ }
+ ]
+ }
\ No newline at end of file
--- /dev/null
+.tmp.json
+.dockererr
+gen_docker-compose*
\ No newline at end of file
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Pod
+metadata:
+ name: helmmanagerservice
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+ run: $HELM_MANAGER_APP_NAME
+ autotest: HELMMANAGER
+spec:
+ serviceAccountName: $HELM_MANAGER_SA_NAME
+ containers:
+ - name: $HELM_MANAGER_APP_NAME
+ image: $HELM_MANAGER_IMAGE
+ imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
+ ports:
+ - name: http
+ containerPort: $HELM_MANAGER_INTERNAL_PORT
+ - name: https
+ containerPort: $HELM_MANAGER_INTERNAL_SECURE_PORT
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+version: '3.0'
+networks:
+ default:
+ external: true
+ name: ${DOCKER_SIM_NWNAME}
+services:
+ chartmuseum:
+ networks:
+ - default
+ container_name: ${HELM_MANAGER_APP_NAME}
+ image: ${HELM_MANAGER_IMAGE}
+ ports:
+ - ${HELM_MANAGER_EXTERNAL_PORT}:${HELM_MANAGER_INTERNAL_PORT}
+ - ${HELM_MANAGER_EXTERNAL_SECURE_PORT}:${HELM_MANAGER_INTERNAL_SECURE_PORT}
+ volumes:
+ - ~/.kube:/root/.kube
+ labels:
+ - "nrttest_app=CHARTMUS"
+ - "nrttest_dp=${HELM_MANAGER_DISPLAY_NAME}"
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: $HELM_MANAGER_SA_NAME
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+ run: $HELM_MANAGER_APP_NAME
+ autotest: HELMMANAGER
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ${HELM_MANAGER_SA_NAME}-clusterrolebinding
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+ run: $HELM_MANAGER_APP_NAME
+ autotest: HELMMANAGER
+subjects:
+- kind: ServiceAccount
+ name: $HELM_MANAGER_SA_NAME
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+roleRef:
+ kind: ClusterRole
+ name: $HELM_MANAGER_CLUSTER_ROLE
+ apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: helmmanagerservice
+ namespace: $KUBE_NONRTRIC_NAMESPACE
+ labels:
+ run: $HELM_MANAGER_APP_NAME
+ autotest: HELMMANAGER
+spec:
+ ports:
+ - port: $HELM_MANAGER_EXTERNAL_PORT
+ targetPort: $HELM_MANAGER_INTERNAL_PORT
+ protocol: TCP
+ name: http
+ - port: $HELM_MANAGER_EXTERNAL_SECURE_PORT
+ targetPort: $HELM_MANAGER_INTERNAL_SECURE_PORT
+ protocol: TCP
+ name: https
+ selector:
+ run: $HELM_MANAGER_APP_NAME
--- /dev/null
+################################################################################
+# Copyright (c) 2020 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the \"License\"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an \"AS IS\" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+
+spring:
+ profiles:
+ active: prod
+ main:
+ allow-bean-definition-overriding: true
+ aop:
+ auto: false
+management:
+ endpoints:
+ web:
+ exposure:
+ # Enabling of springboot actuator features. See springboot documentation.
+ include: "loggers,logfile,health,info,metrics,threaddump,heapdump"
+
+logging:
+ # Configuration of logging
+ level:
+ ROOT: ERROR
+ org.springframework: ERROR
+ org.springframework.data: ERROR
+ org.springframework.web.reactive.function.client.ExchangeFunctions: ERROR
+ ${POLICY_AGENT_PKG_NAME}: INFO
+ file: /var/log/policy-agent/application.log
+
+server:
+ # Configuration of the HTTP/REST server. The parameters are defined and handeled by the springboot framework.
+ # See springboot documentation.
+ port : 8433
+ http-port: 8081
+ ssl:
+ key-store-type: JKS
+ key-store-password: policy_agent
+ key-store: /opt/app/policy-agent/etc/cert/keystore.jks
+ key-password: policy_agent
+ key-alias: policy_agent
+app:
+ # Location of the component configuration file. The file will only be used if the Consul database is not used;
+ # configuration from the Consul will override the file.
+ filepath: /var/policy-management-service/application_configuration.json
+ # path where the service can store data
+ vardata-directory: /var/policy-management-service
+ # path to json schema for config validation
+ config-file-schema-path: /application_configuration_schema.json
+ webclient:
+ # Configuration of the trust store used for the HTTP client (outgoing requests)
+ # The file location and the password for the truststore is only relevant if trust-store-used == true
+ # Note that the same keystore as for the server is used.
+ trust-store-used: false
+ trust-store-password: policy_agent
+ trust-store: /opt/app/policy-agent/etc/cert/truststore.jks
+ # Configuration of usage of HTTP Proxy for the southbound accesses.
+ # The HTTP proxy (if configured) will only be used for accessing NearRT RIC:s
+ http.proxy-host: $AGENT_HTTP_PROXY_CONFIG_HOST_NAME
+ http.proxy-port: $AGENT_HTTP_PROXY_CONFIG_PORT
+ http.proxy-type: HTTP
-# O-RAN-SC Non-RealTime RIC O-DU Closed Loop Usecase Slice Assurance
+# O-RAN-SC Non-RealTime RIC O-DU Closed Loop Usecase Slice Assurance
## Configuration
>- LOG_LEVEL Optional. The log level, which can be `Error`, `Warn`, `Info` or `Debug`. Defaults to `Info`.
>- POLLTIME Optional. Waiting time between one pull request to Dmaap and another. Defaults to 10 sec
+## Functionality
+
+There is a status call provided in a REST API on port 40936.
+>- /status OK
## Development
To make it easy to test during development of the consumer, there is a stub provided in the `stub` folder.
-This stub is used to simulate both received VES messages from Dmaap MR with information about performance measurements for the slices in a determinated DU and also SDNR, that sends information about Radio Resource Management Policy Ratio and allows to modify value for RRM Policy Dedicated Ratio from default to higher value.
+This stub is used to simulate both received VES messages from Dmaap MR with information about performance measurements for the slices in a determinated DU and also SDNR, that sends information about Radio Resource Management Policy Ratio and allows to modify value for RRM Policy Dedicated Ratio from default to higher value.
By default, SDNR stub listens to the port `3904`, but his can be overridden by passing a `--sdnr-port [PORT]` flag when starting the stub. For Dmaap MR stub default port is `3905` but it can be overriden by passing a `--dmaap-port [PORT]` flag when starting the stub.
# By default this file is in the docker build directory,
# but the location can configured in the JJB template.
---
-tag: 1.0.0
+tag: 1.0.1
require github.com/sirupsen/logrus v1.8.1
-require github.com/gorilla/mux v1.8.0
+require (
+ github.com/gorilla/mux v1.8.0
+ github.com/stretchr/testify v1.3.0
+)
-require golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect
+)
}
func (c Config) String() string {
- return fmt.Sprintf("ConsumerHost: %v, ConsumerPort: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, LogLevel: %v", c.MRHost, c.MRPort, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.LogLevel)
+ return fmt.Sprintf("[MRHost: %v, MRPort: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, PollTime: %v, LogLevel: %v]", c.MRHost, c.MRPort, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.Polltime, c.LogLevel)
}
func getEnv(key string, defaultVal string) string {
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package config
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewEnvVarsSetConfigContainSetValues(t *testing.T) {
+ assertions := require.New(t)
+ os.Setenv("MR_HOST", "consumerHost")
+ os.Setenv("MR_PORT", "8095")
+ os.Setenv("SDNR_ADDR", "http://localhost:3904")
+ os.Setenv("SDNR_USER", "admin")
+ os.Setenv("SDNR_PASSWORD", "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U")
+ os.Setenv("Polltime", "30")
+ os.Setenv("LOG_LEVEL", "Debug")
+ t.Cleanup(func() {
+ os.Clearenv()
+ })
+ wantConfig := Config{
+ MRHost: "consumerHost",
+ MRPort: "8095",
+ SDNRAddress: "http://localhost:3904",
+ SDNRUser: "admin",
+ SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
+ Polltime: 30,
+ LogLevel: log.DebugLevel,
+ }
+
+ got := New()
+ assertions.Equal(&wantConfig, got)
+}
+
+func TestNewFaultyIntValueSetConfigContainDefaultValueAndWarnInLog(t *testing.T) {
+ assertions := require.New(t)
+ var buf bytes.Buffer
+ log.SetOutput(&buf)
+
+ os.Setenv("Polltime", "wrong")
+ t.Cleanup(func() {
+ log.SetOutput(os.Stderr)
+ os.Clearenv()
+ })
+ wantConfig := Config{
+ MRHost: "",
+ MRPort: "",
+ SDNRAddress: "http://localhost:3904",
+ SDNRUser: "admin",
+ SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
+ Polltime: 30,
+ LogLevel: log.InfoLevel,
+ }
+
+ got := New()
+ assertions.Equal(&wantConfig, got)
+
+ logString := buf.String()
+ assertions.Contains(logString, "Invalid int value: wrong for variable: Polltime. Default value: 30 will be used")
+}
+
+func TestNewEnvFaultyLogLevelConfigContainDefaultValues(t *testing.T) {
+ assertions := require.New(t)
+ var buf bytes.Buffer
+ log.SetOutput(&buf)
+
+ os.Setenv("LOG_LEVEL", "wrong")
+ t.Cleanup(func() {
+ log.SetOutput(os.Stderr)
+ os.Clearenv()
+ })
+ wantConfig := Config{
+ MRHost: "",
+ MRPort: "",
+ SDNRAddress: "http://localhost:3904",
+ SDNRUser: "admin",
+ SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
+ Polltime: 30,
+ LogLevel: log.InfoLevel,
+ }
+ got := New()
+ assertions.Equal(&wantConfig, got)
+ logString := buf.String()
+ assertions.Contains(logString, "Invalid log level: wrong. Log level will be Info!")
+}
}
}
-type HTTPClient interface {
- Get(path string, v interface{}) error
- Post(path string, payload interface{}, v interface{}) error
-}
-
func (c *Client) Get(path string, v interface{}) error {
req, err := c.newRequest(http.MethodGet, path, nil)
if err != nil {
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package restclient
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewRequest(t *testing.T) {
+ assertions := require.New(t)
+
+ bodyBytes, _ := json.Marshal("body")
+ succesfullReq, _ := http.NewRequest(http.MethodGet, "url", bytes.NewReader(bodyBytes))
+
+ type args struct {
+ method string
+ path string
+ payload interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ want *http.Request
+ wantErr error
+ }{
+ {
+ name: "succesfull newRequest",
+ args: args{
+ method: http.MethodGet,
+ path: "url",
+ payload: "body",
+ },
+ want: succesfullReq,
+ wantErr: nil,
+ },
+ {
+ name: "request failed json marshal",
+ args: args{
+ method: http.MethodGet,
+ path: "url",
+ payload: map[string]interface{}{
+ "foo": make(chan int),
+ },
+ },
+ want: nil,
+ wantErr: fmt.Errorf("failed to marshal request body: json: unsupported type: chan int"),
+ },
+ {
+ name: "request failed calling newRequest",
+ args: args{
+ method: "*?",
+ path: "url",
+ payload: "body",
+ },
+ want: nil,
+ wantErr: fmt.Errorf("failed to create HTTP request: net/http: invalid method \"*?\""),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ client := New(&http.Client{})
+
+ req, err := client.newRequest(tt.args.method, tt.args.path, tt.args.payload)
+ if tt.wantErr != nil {
+ assertions.Equal(tt.want, req)
+ assertions.EqualError(tt.wantErr, err.Error())
+ } else {
+ assertions.Equal("url", req.URL.Path)
+ assertions.Equal("application/json; charset=utf-8", req.Header.Get("Content-Type"))
+ assertions.Empty(req.Header.Get("Authorization"))
+ assertions.Nil(err)
+ }
+
+ })
+ }
+}
+
+func TestGet(t *testing.T) {
+ assertions := require.New(t)
+ type args struct {
+ header string
+ respCode int
+ resp interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr string
+ }{
+ {
+ name: "successful GET request",
+ args: args{
+ header: "application/json",
+ respCode: http.StatusOK,
+ resp: "Success!",
+ },
+ wantErr: "",
+ },
+ {
+ name: "error GET request",
+ args: args{
+ header: "application/json",
+ respCode: http.StatusBadRequest,
+ resp: nil,
+ },
+ wantErr: "failed to do request, 400 status code received",
+ },
+ }
+
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ response, _ := json.Marshal(tt.args.resp)
+ w.Header().Set("Content-Type", tt.args.header)
+ w.WriteHeader(tt.args.respCode)
+ w.Write(response)
+ }))
+ defer srv.Close()
+
+ client := New(&http.Client{})
+ var res interface{}
+ err := client.Get(srv.URL, &res)
+
+ if err != nil {
+ assertions.Equal(tt.wantErr, err.Error())
+ }
+ assertions.Equal(tt.args.resp, res)
+ })
+ }
+}
+
+func TestPost(t *testing.T) {
+ assertions := require.New(t)
+ type args struct {
+ header string
+ respCode int
+ resp interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr string
+ }{
+ {
+ name: "successful Post request",
+ args: args{
+ header: "application/json",
+ respCode: http.StatusOK,
+ resp: "Success!",
+ },
+ wantErr: "",
+ },
+ }
+
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+
+ assertions.Equal(http.MethodPost, r.Method)
+ assertions.Contains(r.Header.Get("Content-Type"), "application/json")
+
+ var reqBody interface{}
+ decoder := json.NewDecoder(r.Body)
+ decoder.Decode(&reqBody)
+ assertions.Equal(reqBody, `json:"example"`)
+
+ response, _ := json.Marshal(tt.args.resp)
+ w.Header().Set("Content-Type", tt.args.header)
+ w.WriteHeader(tt.args.respCode)
+ w.Write(response)
+ }))
+ defer srv.Close()
+
+ client := New(&http.Client{})
+ payload := `json:"example"`
+ err := client.Post(srv.URL, payload, nil)
+
+ if err != nil {
+ assertions.Equal(tt.wantErr, err.Error())
+ }
+ })
+ }
+}
)
type App struct {
- client restclient.HTTPClient
+ client *restclient.Client
metricsPolicies *structures.SliceAssuranceMeas
}
var duid string
var sd, sst int
- regex := *regexp.MustCompile(`\/network-function\/distributed-unit-functions\[id=\'(.*)\'\]/cell\[id=\'(.*)\'\]/supported-measurements\/performance-measurement-type\[\.=\'(.*)\'\]\/supported-snssai-subcounter-instances\/slice-differentiator\[\.=(\d)\]\[slice-service-type=(\d+)\]`)
+ regex := *regexp.MustCompile(`\/(.*)network-function\/distributed-unit-functions\[id=\'(.*)\'\]\/cell\[id=\'(.*)\'\]\/supported-measurements\[performance-measurement-type=\'(.*)\'\]\/supported-snssai-subcounter-instances\[slice-differentiator=\'(\d+)\'\]\[slice-service-type=\'(\d+)\'\]`)
res := regex.FindAllStringSubmatch(meas.MeasurementTypeInstanceReference, -1)
- if res != nil && len(res[0]) == 6 {
- duid = res[0][1]
- sd = toInt(res[0][4])
- sst = toInt(res[0][5])
+ if res != nil && len(res[0]) == 7 {
+ duid = res[0][2]
+ sd = toInt(res[0][5])
+ sst = toInt(res[0][6])
key := MapKey{duid, sd, sst}
value, check := sa.Metrics[key]
if check {
- sa.updateMetric(key, value, res[0][3], meas.Value)
+ sa.updateMetric(key, value, res[0][4], meas.Value)
} else {
// Only add new one if value exceeds threshold
sa.addMetric(res, meas.Value)
func (sa *SliceAssuranceMeas) addMetric(res [][]string, metricValue int) {
if metricValue > 700 {
- metric := NewSliceMetric(res[0][1], res[0][2], toInt(res[0][4]), toInt(res[0][5]))
+ metric := NewSliceMetric(res[0][2], res[0][3], toInt(res[0][5]), toInt(res[0][6]))
metric.PM[res[0][3]] = metricValue
- key := MapKey{res[0][1], toInt(res[0][4]), toInt(res[0][5])}
+ key := MapKey{res[0][2], toInt(res[0][5]), toInt(res[0][6])}
sa.Metrics[key] = metric
}
}
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package structures
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "oransc.org/usecase/oduclosedloop/messages"
+)
+
+func TestAddMetric(t *testing.T) {
+ assertions := require.New(t)
+ type args struct {
+ meas messages.Measurement
+ }
+ tests := []struct {
+ name string
+ args args
+ }{
+ {
+ name: "Test adding new metric",
+ args: args{
+ meas: messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ },
+ },
+ },
+ {
+ name: "Test with invalid input",
+ args: args{
+ meas: messages.Measurement{
+ MeasurementTypeInstanceReference: "/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ },
+ },
+ },
+ }
+
+ sliceAssuranceMeas := NewSliceAssuranceMeas()
+ assertions.Equal(0, len(sliceAssuranceMeas.Metrics), "Metrics is not empty, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 0)
+
+ for i, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+
+ if i == 0 {
+ sliceAssuranceMeas.AddOrUpdateMetric(tt.args.meas)
+ assertions.Equal(1, len(sliceAssuranceMeas.Metrics), "Metrics must have one new metric, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 1)
+
+ testMapKey := MapKey{"O-DU-1211", 1, 1}
+ assertions.Contains(sliceAssuranceMeas.Metrics, testMapKey, "Metric added with wrong values , got: %v.", sliceAssuranceMeas.Metrics[testMapKey])
+ }
+ if i == 1 {
+ _, got := sliceAssuranceMeas.AddOrUpdateMetric(tt.args.meas)
+ assertions.EqualError(got, " wrong format for MeasurementTypeInstanceReference")
+ }
+ })
+ }
+}
+
+func TestUpdateExistingMetric(t *testing.T) {
+ assertions := require.New(t)
+ meas := messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ }
+
+ updateMeas := messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 897,
+ Unit: "kbit/s",
+ }
+
+ sliceAssuranceMeas := NewSliceAssuranceMeas()
+ assertions.Equal(0, len(sliceAssuranceMeas.Metrics), "Metrics is not empty, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 0)
+
+ sliceAssuranceMeas.AddOrUpdateMetric(meas)
+ assertions.Equal(1, len(sliceAssuranceMeas.Metrics), "Metrics must have one new metric, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 1)
+
+ sliceAssuranceMeas.AddOrUpdateMetric(updateMeas)
+ assertions.Equal(1, len(sliceAssuranceMeas.Metrics), "Metrics must have one updated metric, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 1)
+
+ testMapKey := MapKey{"O-DU-1211", 1, 1}
+ metricName := "user-equipment-average-throughput-uplink"
+ newMetricValue := 897
+ if sliceAssuranceMeas.Metrics[testMapKey].PM[metricName] != newMetricValue {
+ t.Errorf("Metric value was not update properly, got: %d, want: %d.", sliceAssuranceMeas.Metrics[testMapKey].PM[metricName], newMetricValue)
+ }
+
+}
+
+func TestDeleteMetricWhenValueLessThanThreshold(t *testing.T) {
+
+ meas := messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ }
+
+ newMeas := messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 50,
+ Unit: "kbit/s",
+ }
+
+ sliceAssuranceMeas := NewSliceAssuranceMeas()
+ assert.Equal(t, 0, len(sliceAssuranceMeas.Metrics), "Metrics is not empty, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 0)
+
+ sliceAssuranceMeas.AddOrUpdateMetric(meas)
+ assert.Equal(t, 1, len(sliceAssuranceMeas.Metrics), "Metrics must have one new metric, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 1)
+
+ sliceAssuranceMeas.AddOrUpdateMetric(newMeas)
+ assert.Equal(t, 0, len(sliceAssuranceMeas.Metrics), "Metrics must have been deleted, got: %d, want: %d.", len(sliceAssuranceMeas.Metrics), 0)
+
+}
+
+func TestAddPolicy(t *testing.T) {
+
+ meas := messages.Measurement{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ }
+ sliceAssuranceMeas := NewSliceAssuranceMeas()
+ sliceAssuranceMeas.AddOrUpdateMetric(meas)
+
+ duid := "O-DU-1211"
+ rrmPolicyRatio := messages.RRMPolicyRatio{
+ Id: "id",
+ AdmState: "locked",
+ UserLabel: "user_label",
+ RRMPolicyMaxRatio: 0,
+ RRMPolicyMinRatio: 0,
+ RRMPolicyDedicatedRatio: 0,
+ ResourceType: "prb",
+ RRMPolicyMembers: []messages.RRMPolicyMember{{
+ MobileCountryCode: "046",
+ MobileNetworkCode: "651",
+ SliceDifferentiator: 1,
+ SliceServiceType: 1,
+ }},
+ }
+ assert.Equal(t, 0, len(sliceAssuranceMeas.Policies), "Policies is not empty, got: %d, want: %d.", len(sliceAssuranceMeas.Policies), 0)
+
+ sliceAssuranceMeas.AddNewPolicy(duid, rrmPolicyRatio)
+ assert.Equal(t, 1, len(sliceAssuranceMeas.Policies), "Policies must have one new policy, got: %d, want: %d.", len(sliceAssuranceMeas.Policies), 1)
+
+ sliceAssuranceMeas.PrintStructures()
+}
import (
"fmt"
+ "net/http"
log "github.com/sirupsen/logrus"
"oransc.org/usecase/oduclosedloop/internal/config"
"oransc.org/usecase/oduclosedloop/internal/sliceassurance"
)
-const TOPIC string = "/events/unauthenticated.PERFORMANCE_MEASUREMENTS"
+const TOPIC string = "unauthenticated.VES_O_RAN_SC_HELLO_WORLD_PM_STREAMING_OUTPUT"
var configuration *config.Config
a := sliceassurance.App{}
a.Initialize(dmaapUrl, configuration.SDNRAddress)
- a.Run(TOPIC, configuration.Polltime)
+ go a.Run(TOPIC, configuration.Polltime)
+ http.HandleFunc("/status", statusHandler)
+
+ log.Fatal(http.ListenAndServe(":40936", nil))
}
func validateConfiguration(configuration *config.Config) error {
}
return nil
}
+
+func statusHandler(w http.ResponseWriter, r *http.Request) {
+ // Just respond OK to show the service is alive for now. Might be extended later.
+}
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package messages
+
+import (
+ "testing"
+)
+
+func TestGetMeasurements(t *testing.T) {
+ type fields struct {
+ Event Event
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want []Measurement
+ }{
+ {
+ name: "get measurements message",
+ fields: fields{
+ Event: Event{
+ CommonEventHeader: CommonEventHeader{
+ Domain: "stndDefined",
+ StndDefinedNamespace: "o-ran-sc-du-hello-world-pm-streaming-oas3",
+ },
+ StndDefinedFields: StndDefinedFields{
+ StndDefinedFieldsVersion: "1.0",
+ SchemaReference: "https://gerrit.o-ran-sc.org/r/gitweb?p=scp/oam/modeling.git;a=blob_plain;f=data-model/oas3/experimental/o-ran-sc-du-hello-world-oas3.json;hb=refs/heads/master",
+ Data: Data{
+ DataId: "id",
+ Measurements: []Measurement{{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ }},
+ },
+ },
+ },
+ },
+ want: []Measurement{{
+ MeasurementTypeInstanceReference: "/o-ran-sc-du-hello-world:network-function/distributed-unit-functions[id='O-DU-1211']/cell[id='cell-1']/supported-measurements[performance-measurement-type='user-equipment-average-throughput-uplink']/supported-snssai-subcounter-instances[slice-differentiator='1'][slice-service-type='1']",
+ Value: 51232,
+ Unit: "kbit/s",
+ }},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ message := StdDefinedMessage{
+ Event: tt.fields.Event,
+ }
+ if got := message.GetMeasurements(); len(got) != len(tt.want) {
+ t.Errorf("Message.GetMeasurements() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+++ /dev/null
-<!--
- ============LICENSE_START=======================================================
- Copyright (C) 2021 Nordix Foundation.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- SPDX-License-Identifier: Apache-2.0
- ============LICENSE_END=========================================================
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <groupId>oransc.org</groupId>
- <artifactId>o-du-slice-assurance</artifactId>
- <version>1.0.0</version>
- <properties>
- <docker-maven-plugin.version>0.30.0</docker-maven-plugin.version>
- </properties>
-
- <build>
- <plugins>
- <plugin>
- <groupId>io.fabric8</groupId>
- <artifactId>docker-maven-plugin</artifactId>
- <version>${docker-maven-plugin.version}</version>
- <inherited>false</inherited>
- <executions>
- <execution>
- <id>generate-nonrtric-o-du-slice-assurance-image</id>
- <phase>package</phase>
- <goals>
- <goal>build</goal>
- </goals>
- <configuration>
- <pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
- <images>
- 
- </images>
- </configuration>
- </execution>
- <execution>
- <id>push-nonrtric-o-du-slice-assurance-image</id>
- <goals>
- <goal>build</goal>
- <goal>push</goal>
- </goals>
- <configuration>
- <pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
- <pushRegistry>${env.CONTAINER_PUSH_REGISTRY}</pushRegistry>
- <images>
- 
- </images>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
AdmState: "locked",
UserLabel: "rrm-pol-1",
RRMPolicyMaxRatio: 100,
- RRMPolicyMinRatio: "0",
- RRMPolicyDedicatedRatio: "0",
+ RRMPolicyMinRatio: 0,
+ RRMPolicyDedicatedRatio: 0,
ResourceType: "prb",
RRMPolicyMembers: []messages.RRMPolicyMember{
{
AdmState: "unlocked",
UserLabel: "rrm-pol-2",
RRMPolicyMaxRatio: 20,
- RRMPolicyMinRatio: "10",
- RRMPolicyDedicatedRatio: "15",
+ RRMPolicyMinRatio: 10,
+ RRMPolicyDedicatedRatio: 15,
ResourceType: "prb",
RRMPolicyMembers: []messages.RRMPolicyMember{
{
AdmState: "unlocked",
UserLabel: "rrm-pol-3",
RRMPolicyMaxRatio: 30,
- RRMPolicyMinRatio: "10",
- RRMPolicyDedicatedRatio: "5",
+ RRMPolicyMinRatio: 10,
+ RRMPolicyDedicatedRatio: 5,
ResourceType: "prb",
RRMPolicyMembers: []messages.RRMPolicyMember{
{
}
func updateRRMPolicyDedicatedRatio(w http.ResponseWriter, r *http.Request) {
- //vars := mux.Vars(r)
- fmt.Println("::updateRRMPolicyDedicatedRatio::")
var prMessage messages.DistributedUnitFunction
decoder := json.NewDecoder(r.Body)
defer r.Body.Close()
fmt.Println("prMessage: ", prMessage)
- //prMessage.Id = vars["POLICY-ID"]
respondWithJSON(w, http.StatusOK, map[string]string{"status": "200"})
}
"taskParameters": [
{
"key": "ORU-ODU-Map",
- "value": "{\"ERICSSON-O-RU-11220\": \"HCL-O-DU-1122\",
- \"ERICSSON-O-RU-11221\": \"HCL-O-DU-1122\",
- \"ERICSSON-O-RU-11222\": \"HCL-O-DU-1122\",
- \"ERICSSON-O-RU-11223\": \"HCL-O-DU-1122\",
- \"ERICSSON-O-RU-11224\": \"HCL-O-DU-1123\",
- \"ERICSSON-O-RU-11225\": \"HCL-O-DU-1123\",
- \"ERICSSON-O-RU-11226\": \"HCL-O-DU-1123\",
- \"ERICSSON-O-RU-11227\": \"HCL-O-DU-1124\",
- \"ERICSSON-O-RU-11228\": \"HCL-O-DU-1125\",
- \"ERICSSON-O-RU-11229\": \"HCL-O-DU-1125\"}"
+ "value": "{\"ERICSSON-O-RU-11220\": \"O-DU-1122\",
+ \"ERICSSON-O-RU-11221\": \"O-DU-1122\",
+ \"ERICSSON-O-RU-11222\": \"O-DU-1122\",
+ \"ERICSSON-O-RU-11223\": \"O-DU-1122\",
+ \"ERICSSON-O-RU-11224\": \"O-DU-1123\",
+ \"ERICSSON-O-RU-11225\": \"O-DU-1123\",
+ \"ERICSSON-O-RU-11226\": \"O-DU-1123\",
+ \"ERICSSON-O-RU-11227\": \"O-DU-1124\",
+ \"ERICSSON-O-RU-11228\": \"O-DU-1125\",
+ \"ERICSSON-O-RU-11229\": \"O-DU-1125\"}"
}
]
}
"carrierTechnology": "RESTCLIENT",
"parameterClassName": "org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters",
"parameters": {
- "url": "http://sdnr-sim:9990/rests/data/network-topology:network-topology/topology=topology-netconf/node={OduId}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection={OruId}",
+ "url": "http://sdnr-sim:9990/rests/data/network-topology:network-topology/topology=topology-netconf/node={OduId}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={OduId}/radio-resource-management-policy-ratio=rrm-pol-1",
"httpMethod" : "PUT",
"httpHeaders" : [
["Authorization", "Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="]
-{"tosca_definitions_version":"tosca_simple_yaml_1_1_0","topology_template":{"policies":[{"onap.policies.native.apex.LinkMonitor":{"type":"onap.policies.native.Apex","type_version":"1.0.0","name":"onap.policies.native.apex.LinkMonitor","version":"1.0.0","properties":{"engineServiceParameters":{"name":"LinkMonitorApexEngine","version":"0.0.1","id":101,"instanceCount":1,"deploymentPort":12345,"engineParameters":{"executorParameters":{"JAVASCRIPT":{"parameterClassName":"org.onap.policy.apex.plugins.executor.javascript.JavascriptExecutorParameters"}},"contextParameters":{"parameterClassName":"org.onap.policy.apex.context.parameters.ContextParameters","schemaParameters":{"Avro":{"parameterClassName":"org.onap.policy.apex.plugins.context.schema.avro.AvroSchemaHelperParameters"}}},"taskParameters":[{"key":"ORU-ODU-Map","value":"{\"ERICSSON-O-RU-11220\": \"HCL-O-DU-1122\",\n \"ERICSSON-O-RU-11221\": \"HCL-O-DU-1122\",\n \"ERICSSON-O-RU-11222\": \"HCL-O-DU-1122\",\n \"ERICSSON-O-RU-11223\": \"HCL-O-DU-1122\",\n \"ERICSSON-O-RU-11224\": \"HCL-O-DU-1123\",\n \"ERICSSON-O-RU-11225\": \"HCL-O-DU-1123\",\n \"ERICSSON-O-RU-11226\": \"HCL-O-DU-1123\",\n \"ERICSSON-O-RU-11227\": \"HCL-O-DU-1124\",\n \"ERICSSON-O-RU-11228\": \"HCL-O-DU-1125\",\n \"ERICSSON-O-RU-11229\": \"HCL-O-DU-1125\"}"}]},"policy_type_impl":{"apexPolicyModel":{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"keyInformation":{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"keyInfoMap":{"entry":[{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"value":{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"UUID":"cca47d74-7754-4a61-b163-ca31f66b157b","description":"Generated description for concept referred to by key \"ApexMessageOutputEvent:0.0.1\""}},{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"UUID":"a295d6a3-1b73-387e-abba-b41e9b608802","description":"Generated description for concept referred to by key \"CreateLinkClearedOutfieldsEvent:0.0.1\""}},{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"UUID":"fd594e88-411d-4a94-b2be-697b3a0d7adf","description":"This task creates the output fields when link failure is cleared."}},{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"UUID":"02be2b5d-45b7-3c54-ae54-97f2b5c30125","description":"Generated description for concept referred to by key \"CreateLinkFailureOutfieldsEvent:0.0.1\""}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"UUID":"ac3d9842-80af-4a98-951c-bd79a431c613","description":"This task the output fields when link failure is detected."}},{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"name":"LinkClearedTask","version":"0.0.1"},"UUID":"eecfde90-896c-4343-8f9c-2603ced94e2d","description":"This task sends a message to the output when link failure is cleared."}},{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"UUID":"c4500941-3f98-4080-a9cc-5b9753ed050b","description":"Generated description for concept referred to by key \"LinkFailureInputEvent:0.0.1\""}},{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"UUID":"3b3974fc-3012-3b02-9f33-c9d8eefe4dc1","description":"Generated description for concept referred to by key \"LinkFailureInputSchema:0.0.1\""}},{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"UUID":"4f04aa98-e917-4f4a-882a-c75ba5a99374","description":"Generated description for concept referred to by key \"LinkFailureOutputEvent:0.0.1\""}},{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"UUID":"2d1a7f6e-eb9a-3984-be1f-283d98111b84","description":"Generated description for concept referred to by key \"LinkFailureOutputSchema:0.0.1\""}},{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"name":"LinkFailureTask","version":"0.0.1"},"UUID":"3351b0f4-cf06-4fa2-8823-edf67bd30223","description":"This task updates the config for O-RU when link failure is detected."}},{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"UUID":"540226fb-55ee-4f0e-a444-983a0494818e","description":"This is the Apex Policy Model for link monitoring."}},{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"UUID":"27ad3e7e-fe3b-3bd6-9081-718705c2bcea","description":"Generated description for concept referred to by key \"LinkMonitorModel_Events:0.0.1\""}},{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"UUID":"ea0b5f58-eefd-358a-9660-840c640bf981","description":"Generated description for concept referred to by key \"LinkMonitorModel_KeyInfo:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"UUID":"ee9e0b0f-2b7d-3ab7-9a98-c5ec05ed823d","description":"Generated description for concept referred to by key \"LinkMonitorModel_Policies:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"UUID":"fa5f9b8f-796c-3c70-84e9-5140c958c4bb","description":"Generated description for concept referred to by key \"LinkMonitorModel_Schemas:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"UUID":"eec592f7-69d5-39a9-981a-e552f787ed01","description":"Generated description for concept referred to by key \"LinkMonitorModel_Tasks:0.0.1\""}},{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"UUID":"6c5e410f-489a-46ff-964e-982ce6e8b6d0","description":"Generated description for concept referred to by key \"LinkMonitorPolicy:0.0.1\""}},{"key":{"name":"MessageSchema","version":"0.0.1"},"value":{"key":{"name":"MessageSchema","version":"0.0.1"},"UUID":"ac4b34ac-39d6-3393-a267-8d5b84854018","description":"A schema for messages from apex"}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"UUID":"d48b619e-d00d-4008-b884-02d76ea4350b","description":"This task sends a message to the output when an event is received for which no policy has been defined."}},{"key":{"name":"OduIdSchema","version":"0.0.1"},"value":{"key":{"name":"OduIdSchema","version":"0.0.1"},"UUID":"50662174-a88b-3cbd-91bd-8e91b40b2660","description":"A schema for O-DU-ID"}},{"key":{"name":"OruIdSchema","version":"0.0.1"},"value":{"key":{"name":"OruIdSchema","version":"0.0.1"},"UUID":"54daf32b-015f-39cd-8530-a1175c5553e9","description":"A schema for O-RU-ID"}}]}},"policies":{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"policyMap":{"entry":[{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"value":{"policyKey":{"name":"LinkMonitorPolicy","version":"0.0.1"},"template":"Freestyle","state":{"entry":[{"key":"LinkClearedState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkClearedState"},"trigger":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"LinkClearedLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedLogic_Output_Direct"},"outgoingEvent":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"NULL","logicFlavour":"UNDEFINED","logic":""},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"LinkClearedTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedLogic_Output_Direct"}}}]}}},{"key":"LinkFailureOrClearedState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureOrClearedState"},"trigger":{"name":"LinkFailureInputEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"CreateLinkClearedOutfieldsLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsLogic_Output_Direct"},"outgoingEvent":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"nextState":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkClearedState"}}},{"key":"CreateLinkFailureOutfieldsLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsLogic_Output_Direct"},"outgoingEvent":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"nextState":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureState"}}},{"key":"NoPolicyDefinedLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedLogic_Output_Direct"},"outgoingEvent":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"TaskSelectionLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Selection Execution: '\"+executor.subject.id+\n \"'. InputFields: '\"+executor.inFields+\"'\");\n\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar commonEventHeader = linkFailureInput.get(\"event\").get(\"commonEventHeader\");\nvar domain = commonEventHeader.get(\"domain\");\n\ntaskFailure = executor.subject.getTaskKey(\"CreateLinkFailureOutfieldsTask\");\ntaskCleared = executor.subject.getTaskKey(\"CreateLinkClearedOutfieldsTask\");\ntaskDefault = executor.subject.getDefaultTaskKey();\n\nif (domain == \"fault\") {\n var faultFields = linkFailureInput.get(\"event\").get(\"faultFields\");\n var alarmCondition = faultFields.get(\"alarmCondition\");\n var eventSeverity = faultFields.get(\"eventSeverity\");\n if (alarmCondition == \"28\" && eventSeverity != \"NORMAL\") {\n taskFailure.copyTo(executor.selectedTask);\n } else if (alarmCondition == \"28\" && eventSeverity == \"NORMAL\") {\n taskCleared.copyTo(executor.selectedTask);\n } else {\n taskDefault.copyTo(executor.selectedTask);\n }\n} else {\n taskDefault.copyTo(executor.selectedTask);\n}\n\ntrue;"},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsLogic_Output_Direct"}}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsLogic_Output_Direct"}}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedLogic_Output_Direct"}}}]}}},{"key":"LinkFailureState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureState"},"trigger":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"LinkFailureLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureLogic_Output_Direct"},"outgoingEvent":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"NULL","logicFlavour":"UNDEFINED","logic":""},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"LinkFailureTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureLogic_Output_Direct"}}}]}}}]},"firstState":"LinkFailureOrClearedState"}}]}},"tasks":{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"taskMap":{"entry":[{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar oruId = linkFailureInput.get(\"event\").get(\"commonEventHeader\").get(\"sourceName\");\n\nexecutor.outFields.put(\"OruId\", oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar returnValue = true;\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar oruId = linkFailureInput.get(\"event\").get(\"commonEventHeader\").get(\"sourceName\");\nvar oruOduMap = JSON.parse(executor.parameters.get(\"ORU-ODU-Map\"));\n\nif (oruId in oruOduMap) {\n var oduId = oruOduMap[oruId];\n executor.outFields.put(\"OruId\", oruId);\n executor.outFields.put(\"OduId\", oduId);\n executor.logger.info(executor.outFields);\n} else {\n executor.message = \"No O-RU found in the config with this ID: \" + oruId;\n returnValue = false;\n}\n\nreturnValue;"}}},{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"name":"LinkClearedTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar oruId = executor.inFields.get(\"OruId\");\n\nexecutor.outFields.put(\"message\", \"CLEARED link failure for O-RU: \" + oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"name":"LinkFailureTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"LinkFailureOutput","value":{"key":"LinkFailureOutput","fieldSchemaKey":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar linkFailureOutput = executor.subject.getOutFieldSchemaHelper(\"LinkFailureOutput\").createNewInstance();\n\nvar oruId = executor.inFields.get(\"OruId\");\nvar oduId = executor.inFields.get(\"OduId\");\n\nvar unlockMessageArray = new java.util.ArrayList();\nfor (var i = 0; i < 1; i++) {\n unlockMessageArray.add({\n \"name\" : oruId,\n \"administrative_DasH_state\" : \"UNLOCKED\"\n });\n}\n\nlinkFailureOutput.put(\"o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_du_DasH_to_DasH_ru_DasH_connection\", unlockMessageArray);\nexecutor.outFields.put(\"LinkFailureOutput\", linkFailureOutput.toString());\n\nexecutor.getExecutionProperties().setProperty(\"OduId\", oduId);\nexecutor.getExecutionProperties().setProperty(\"OruId\", oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nexecutor.outFields.put(\"message\", \"No policy defined for this event\");\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}}]}},"events":{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"eventMap":{"entry":[{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"value":{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"DMAAP","target":"APEX","parameter":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"OAM","parameter":{"entry":[{"key":"LinkFailureOutput","value":{"key":"LinkFailureOutput","fieldSchemaKey":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"optional":false}}]}}}]}},"schemas":{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"schemas":{"entry":[{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"schemaFlavour":"Avro","schemaDefinition":"{\n \"type\": \"record\",\n \"name\": \"Link_Failure_Input\",\n \"fields\": [\n {\n \"name\": \"event\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Event_Type\",\n \"fields\": [\n {\n \"name\": \"commonEventHeader\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Common_Event_Header_Type\",\n \"fields\": [\n {\n \"name\": \"domain\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sequence\",\n \"type\": \"int\"\n },\n {\n \"name\": \"priority\",\n \"type\": \"string\"\n },\n {\n \"name\": \"reportingEntityId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"reportingEntityName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sourceId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sourceName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"startEpochMicrosec\",\n \"type\": \"string\"\n },\n {\n \"name\": \"lastEpochMicrosec\",\n \"type\": \"string\"\n },\n {\n \"name\": \"nfNamingCode\",\n \"type\": \"string\"\n },\n {\n \"name\": \"nfVendorName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"timeZoneOffset\",\n \"type\": \"string\"\n },\n {\n \"name\": \"version\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vesEventListenerVersion\",\n \"type\": \"string\"\n }\n ]\n }\n },\n {\n \"name\": \"faultFields\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Fault_Fields_Type\",\n \"fields\": [\n {\n \"name\": \"faultFieldsVersion\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmCondition\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmInterfaceA\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventSourceType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"specificProblem\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventSeverity\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vfStatus\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmAdditionalInformation\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Alarm_Additional_Information_Type\",\n \"fields\": [\n {\n \"name\": \"eventTime\",\n \"type\": \"string\"\n },\n {\n \"name\": \"equipType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vendor\",\n \"type\": \"string\"\n },\n {\n \"name\": \"model\",\n \"type\": \"string\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n ]\n}"}},{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"schemaFlavour":"Avro","schemaDefinition":"{\n \"type\": \"record\",\n \"name\": \"Link_Failure_Output\",\n \"fields\": [\n {\n \"name\": \"o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_du_DasH_to_DasH_ru_DasH_connection\",\n \"type\": {\n \t\"type\": \"array\",\n \t\"items\": {\n\t\t \"name\": \"Config_Change_Message\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\": \"name\",\n \"type\": \"string\"\n },\n\t\t\t{\n \"name\": \"administrative_DasH_state\",\n \"type\": \"string\"\n }\n ]\n }\n\t }\n }\n ]\n}"}},{"key":{"name":"MessageSchema","version":"0.0.1"},"value":{"key":{"name":"MessageSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}},{"key":{"name":"OduIdSchema","version":"0.0.1"},"value":{"key":{"name":"OduIdSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}},{"key":{"name":"OruIdSchema","version":"0.0.1"},"value":{"key":{"name":"OruIdSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}}]}}}}},"eventOutputParameters":{"RestProducer":{"carrierTechnologyParameters":{"carrierTechnology":"RESTCLIENT","parameterClassName":"org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters","parameters":{"url":"http://sdnr-sim:9990/rests/data/network-topology:network-topology/topology=topology-netconf/node={OduId}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection={OruId}","httpMethod":"PUT","httpHeaders":[["Authorization","Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="]]}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"pojoField":"LinkFailureOutput"}},"eventNameFilter":"LinkFailureOutputEvent"},"StdOutProducer":{"carrierTechnologyParameters":{"carrierTechnology":"FILE","parameters":{"standardIo":true}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"pojoField":"message"}},"eventNameFilter":"ApexMessageOutputEvent"}},"eventInputParameters":{"DMaaPConsumer":{"carrierTechnologyParameters":{"carrierTechnology":"RESTCLIENT","parameterClassName":"org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters","parameters":{"url":"http://onap-dmaap:3904/events/unauthenticated.SEC_FAULT_OUTPUT/users/link-monitor-nonrtric?timeout=15000&limit=100"}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"versionAlias":"version","pojoField":"LinkFailureInput"}},"eventName":"LinkFailureInputEvent"}}}}}]}}
\ No newline at end of file
+{"tosca_definitions_version":"tosca_simple_yaml_1_1_0","topology_template":{"policies":[{"onap.policies.native.apex.LinkMonitor":{"type":"onap.policies.native.Apex","type_version":"1.0.0","name":"onap.policies.native.apex.LinkMonitor","version":"1.0.0","properties":{"engineServiceParameters":{"name":"LinkMonitorApexEngine","version":"0.0.1","id":101,"instanceCount":1,"deploymentPort":12345,"engineParameters":{"executorParameters":{"JAVASCRIPT":{"parameterClassName":"org.onap.policy.apex.plugins.executor.javascript.JavascriptExecutorParameters"}},"contextParameters":{"parameterClassName":"org.onap.policy.apex.context.parameters.ContextParameters","schemaParameters":{"Avro":{"parameterClassName":"org.onap.policy.apex.plugins.context.schema.avro.AvroSchemaHelperParameters"}}},"taskParameters":[{"key":"ORU-ODU-Map","value":"{\"ERICSSON-O-RU-11220\": \"O-DU-1122\",\r\n \"ERICSSON-O-RU-11221\": \"O-DU-1122\",\r\n \"ERICSSON-O-RU-11222\": \"O-DU-1122\",\r\n \"ERICSSON-O-RU-11223\": \"O-DU-1122\",\r\n \"ERICSSON-O-RU-11224\": \"O-DU-1123\",\r\n \"ERICSSON-O-RU-11225\": \"O-DU-1123\",\r\n \"ERICSSON-O-RU-11226\": \"O-DU-1123\",\r\n \"ERICSSON-O-RU-11227\": \"O-DU-1124\",\r\n \"ERICSSON-O-RU-11228\": \"O-DU-1125\",\r\n \"ERICSSON-O-RU-11229\": \"O-DU-1125\"}"}]},"policy_type_impl":{"apexPolicyModel":{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"keyInformation":{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"keyInfoMap":{"entry":[{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"value":{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"UUID":"cca47d74-7754-4a61-b163-ca31f66b157b","description":"Generated description for concept referred to by key \"ApexMessageOutputEvent:0.0.1\""}},{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"UUID":"a295d6a3-1b73-387e-abba-b41e9b608802","description":"Generated description for concept referred to by key \"CreateLinkClearedOutfieldsEvent:0.0.1\""}},{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"UUID":"fd594e88-411d-4a94-b2be-697b3a0d7adf","description":"This task creates the output fields when link failure is cleared."}},{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"UUID":"02be2b5d-45b7-3c54-ae54-97f2b5c30125","description":"Generated description for concept referred to by key \"CreateLinkFailureOutfieldsEvent:0.0.1\""}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"UUID":"ac3d9842-80af-4a98-951c-bd79a431c613","description":"This task the output fields when link failure is detected."}},{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"name":"LinkClearedTask","version":"0.0.1"},"UUID":"eecfde90-896c-4343-8f9c-2603ced94e2d","description":"This task sends a message to the output when link failure is cleared."}},{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"UUID":"c4500941-3f98-4080-a9cc-5b9753ed050b","description":"Generated description for concept referred to by key \"LinkFailureInputEvent:0.0.1\""}},{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"UUID":"3b3974fc-3012-3b02-9f33-c9d8eefe4dc1","description":"Generated description for concept referred to by key \"LinkFailureInputSchema:0.0.1\""}},{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"UUID":"4f04aa98-e917-4f4a-882a-c75ba5a99374","description":"Generated description for concept referred to by key \"LinkFailureOutputEvent:0.0.1\""}},{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"UUID":"2d1a7f6e-eb9a-3984-be1f-283d98111b84","description":"Generated description for concept referred to by key \"LinkFailureOutputSchema:0.0.1\""}},{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"name":"LinkFailureTask","version":"0.0.1"},"UUID":"3351b0f4-cf06-4fa2-8823-edf67bd30223","description":"This task updates the config for O-RU when link failure is detected."}},{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel","version":"0.0.1"},"UUID":"540226fb-55ee-4f0e-a444-983a0494818e","description":"This is the Apex Policy Model for link monitoring."}},{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"UUID":"27ad3e7e-fe3b-3bd6-9081-718705c2bcea","description":"Generated description for concept referred to by key \"LinkMonitorModel_Events:0.0.1\""}},{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_KeyInfo","version":"0.0.1"},"UUID":"ea0b5f58-eefd-358a-9660-840c640bf981","description":"Generated description for concept referred to by key \"LinkMonitorModel_KeyInfo:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"UUID":"ee9e0b0f-2b7d-3ab7-9a98-c5ec05ed823d","description":"Generated description for concept referred to by key \"LinkMonitorModel_Policies:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"UUID":"fa5f9b8f-796c-3c70-84e9-5140c958c4bb","description":"Generated description for concept referred to by key \"LinkMonitorModel_Schemas:0.0.1\""}},{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"UUID":"eec592f7-69d5-39a9-981a-e552f787ed01","description":"Generated description for concept referred to by key \"LinkMonitorModel_Tasks:0.0.1\""}},{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"value":{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"UUID":"6c5e410f-489a-46ff-964e-982ce6e8b6d0","description":"Generated description for concept referred to by key \"LinkMonitorPolicy:0.0.1\""}},{"key":{"name":"MessageSchema","version":"0.0.1"},"value":{"key":{"name":"MessageSchema","version":"0.0.1"},"UUID":"ac4b34ac-39d6-3393-a267-8d5b84854018","description":"A schema for messages from apex"}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"UUID":"d48b619e-d00d-4008-b884-02d76ea4350b","description":"This task sends a message to the output when an event is received for which no policy has been defined."}},{"key":{"name":"OduIdSchema","version":"0.0.1"},"value":{"key":{"name":"OduIdSchema","version":"0.0.1"},"UUID":"50662174-a88b-3cbd-91bd-8e91b40b2660","description":"A schema for O-DU-ID"}},{"key":{"name":"OruIdSchema","version":"0.0.1"},"value":{"key":{"name":"OruIdSchema","version":"0.0.1"},"UUID":"54daf32b-015f-39cd-8530-a1175c5553e9","description":"A schema for O-RU-ID"}}]}},"policies":{"key":{"name":"LinkMonitorModel_Policies","version":"0.0.1"},"policyMap":{"entry":[{"key":{"name":"LinkMonitorPolicy","version":"0.0.1"},"value":{"policyKey":{"name":"LinkMonitorPolicy","version":"0.0.1"},"template":"Freestyle","state":{"entry":[{"key":"LinkClearedState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkClearedState"},"trigger":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"LinkClearedLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedLogic_Output_Direct"},"outgoingEvent":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"NULL","logicFlavour":"UNDEFINED","logic":""},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"LinkClearedTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkClearedState","localName":"LinkClearedLogic_Output_Direct"}}}]}}},{"key":"LinkFailureOrClearedState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureOrClearedState"},"trigger":{"name":"LinkFailureInputEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"CreateLinkClearedOutfieldsLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsLogic_Output_Direct"},"outgoingEvent":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"nextState":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkClearedState"}}},{"key":"CreateLinkFailureOutfieldsLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsLogic_Output_Direct"},"outgoingEvent":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"nextState":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureState"}}},{"key":"NoPolicyDefinedLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedLogic_Output_Direct"},"outgoingEvent":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"TaskSelectionLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Selection Execution: '\"+executor.subject.id+\n \"'. InputFields: '\"+executor.inFields+\"'\");\n\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar commonEventHeader = linkFailureInput.get(\"event\").get(\"commonEventHeader\");\nvar domain = commonEventHeader.get(\"domain\");\n\ntaskFailure = executor.subject.getTaskKey(\"CreateLinkFailureOutfieldsTask\");\ntaskCleared = executor.subject.getTaskKey(\"CreateLinkClearedOutfieldsTask\");\ntaskDefault = executor.subject.getDefaultTaskKey();\n\nif (domain == \"fault\") {\n var faultFields = linkFailureInput.get(\"event\").get(\"faultFields\");\n var alarmCondition = faultFields.get(\"alarmCondition\");\n var eventSeverity = faultFields.get(\"eventSeverity\");\n if (alarmCondition == \"28\" && eventSeverity != \"NORMAL\") {\n taskFailure.copyTo(executor.selectedTask);\n } else if (alarmCondition == \"28\" && eventSeverity == \"NORMAL\") {\n taskCleared.copyTo(executor.selectedTask);\n } else {\n taskDefault.copyTo(executor.selectedTask);\n }\n} else {\n taskDefault.copyTo(executor.selectedTask);\n}\n\ntrue;"},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkClearedOutfieldsLogic_Output_Direct"}}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"CreateLinkFailureOutfieldsLogic_Output_Direct"}}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureOrClearedState","localName":"NoPolicyDefinedLogic_Output_Direct"}}}]}}},{"key":"LinkFailureState","value":{"stateKey":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"NULL","localName":"LinkFailureState"},"trigger":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"stateOutputs":{"entry":[{"key":"LinkFailureLogic_Output_Direct","value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureLogic_Output_Direct"},"outgoingEvent":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"nextState":{"parentKeyName":"NULL","parentKeyVersion":"0.0.0","parentLocalName":"NULL","localName":"NULL"}}}]},"contextAlbumReference":[],"taskSelectionLogic":{"key":"NULL","logicFlavour":"UNDEFINED","logic":""},"stateFinalizerLogicMap":{"entry":[]},"defaultTask":{"name":"LinkFailureTask","version":"0.0.1"},"taskReferences":{"entry":[{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureTask"},"outputType":"DIRECT","output":{"parentKeyName":"LinkMonitorPolicy","parentKeyVersion":"0.0.1","parentLocalName":"LinkFailureState","localName":"LinkFailureLogic_Output_Direct"}}}]}}}]},"firstState":"LinkFailureOrClearedState"}}]}},"tasks":{"key":{"name":"LinkMonitorModel_Tasks","version":"0.0.1"},"taskMap":{"entry":[{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar oruId = linkFailureInput.get(\"event\").get(\"commonEventHeader\").get(\"sourceName\");\n\nexecutor.outFields.put(\"OruId\", oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar returnValue = true;\nvar linkFailureInput = executor.inFields.get(\"LinkFailureInput\");\nvar oruId = linkFailureInput.get(\"event\").get(\"commonEventHeader\").get(\"sourceName\");\nvar oruOduMap = JSON.parse(executor.parameters.get(\"ORU-ODU-Map\"));\n\nif (oruId in oruOduMap) {\n var oduId = oruOduMap[oruId];\n executor.outFields.put(\"OruId\", oruId);\n executor.outFields.put(\"OduId\", oduId);\n executor.logger.info(executor.outFields);\n} else {\n executor.message = \"No O-RU found in the config with this ID: \" + oruId;\n returnValue = false;\n}\n\nreturnValue;"}}},{"key":{"name":"LinkClearedTask","version":"0.0.1"},"value":{"key":{"name":"LinkClearedTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar oruId = executor.inFields.get(\"OruId\");\n\nexecutor.outFields.put(\"message\", \"CLEARED link failure for O-RU: \" + oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"LinkFailureTask","version":"0.0.1"},"value":{"key":{"name":"LinkFailureTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"LinkFailureOutput","value":{"key":"LinkFailureOutput","fieldSchemaKey":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nvar linkFailureOutput = executor.subject.getOutFieldSchemaHelper(\"LinkFailureOutput\").createNewInstance();\n\nvar oruId = executor.inFields.get(\"OruId\");\nvar oduId = executor.inFields.get(\"OduId\");\n\nvar unlockMessageArray = new java.util.ArrayList();\nfor (var i = 0; i < 1; i++) {\n unlockMessageArray.add({\n \"id\":\"rrm-pol-1\",\n \"radio_DasH_resource_DasH_management_DasH_policy_DasH_max_DasH_ratio\":25,\n \"radio_DasH_resource_DasH_management_DasH_policy_DasH_members\":\n [\n {\n \"mobile_DasH_country_DasH_code\":\"310\",\n \"mobile_DasH_network_DasH_code\":\"150\",\n \"slice_DasH_differentiator\":1,\n \"slice_DasH_service_DasH_type\":1\n }\n ],\n \"radio_DasH_resource_DasH_management_DasH_policy_DasH_min_DasH_ratio\":15,\n \"user_DasH_label\":\"rrm-pol-1\",\n \"resource_DasH_type\":\"prb\",\n \"radio_DasH_resource_DasH_management_DasH_policy_DasH_dedicated_DasH_ratio\":20,\n \"administrative_DasH_state\":\"unlocked\"\n });\n}\n\nlinkFailureOutput.put(\"o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio\", unlockMessageArray);\nexecutor.outFields.put(\"LinkFailureOutput\", linkFailureOutput.toString());\n\nexecutor.getExecutionProperties().setProperty(\"OduId\", oduId);\nexecutor.getExecutionProperties().setProperty(\"OruId\", oruId);\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}},{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"value":{"key":{"name":"NoPolicyDefinedTask","version":"0.0.1"},"inputFields":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]},"outputFields":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]},"taskParameters":{"entry":[]},"contextAlbumReference":[],"taskLogic":{"key":"TaskLogic","logicFlavour":"JAVASCRIPT","logic":"/*\n * ============LICENSE_START=======================================================\n * Copyright (C) 2021 Nordix Foundation.\n * ================================================================================\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * SPDX-License-Identifier: Apache-2.0\n * ============LICENSE_END=========================================================\n */\n\nexecutor.logger.info(\"Task Execution: '\"+executor.subject.id+\"'. Input Fields: '\"+executor.inFields+\"'\");\n\nexecutor.outFields.put(\"message\", \"No policy defined for this event\");\n\nexecutor.logger.info(executor.outFields);\n\ntrue;"}}}]}},"events":{"key":{"name":"LinkMonitorModel_Events","version":"0.0.1"},"eventMap":{"entry":[{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"value":{"key":{"name":"ApexMessageOutputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"message","value":{"key":"message","fieldSchemaKey":{"name":"MessageSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkClearedOutfieldsEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"value":{"key":{"name":"CreateLinkFailureOutfieldsEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"APEX","parameter":{"entry":[{"key":"OduId","value":{"key":"OduId","fieldSchemaKey":{"name":"OduIdSchema","version":"0.0.1"},"optional":false}},{"key":"OruId","value":{"key":"OruId","fieldSchemaKey":{"name":"OruIdSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"DMAAP","target":"APEX","parameter":{"entry":[{"key":"LinkFailureInput","value":{"key":"LinkFailureInput","fieldSchemaKey":{"name":"LinkFailureInputSchema","version":"0.0.1"},"optional":false}}]}}},{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputEvent","version":"0.0.1"},"nameSpace":"org.onap.policy.apex.auth.clieditor","source":"APEX","target":"OAM","parameter":{"entry":[{"key":"LinkFailureOutput","value":{"key":"LinkFailureOutput","fieldSchemaKey":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"optional":false}}]}}}]}},"schemas":{"key":{"name":"LinkMonitorModel_Schemas","version":"0.0.1"},"schemas":{"entry":[{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureInputSchema","version":"0.0.1"},"schemaFlavour":"Avro","schemaDefinition":"{\n \"type\": \"record\",\n \"name\": \"Link_Failure_Input\",\n \"fields\": [\n {\n \"name\": \"event\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Event_Type\",\n \"fields\": [\n {\n \"name\": \"commonEventHeader\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Common_Event_Header_Type\",\n \"fields\": [\n {\n \"name\": \"domain\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sequence\",\n \"type\": \"int\"\n },\n {\n \"name\": \"priority\",\n \"type\": \"string\"\n },\n {\n \"name\": \"reportingEntityId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"reportingEntityName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sourceId\",\n \"type\": \"string\"\n },\n {\n \"name\": \"sourceName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"startEpochMicrosec\",\n \"type\": \"string\"\n },\n {\n \"name\": \"lastEpochMicrosec\",\n \"type\": \"string\"\n },\n {\n \"name\": \"nfNamingCode\",\n \"type\": \"string\"\n },\n {\n \"name\": \"nfVendorName\",\n \"type\": \"string\"\n },\n {\n \"name\": \"timeZoneOffset\",\n \"type\": \"string\"\n },\n {\n \"name\": \"version\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vesEventListenerVersion\",\n \"type\": \"string\"\n }\n ]\n }\n },\n {\n \"name\": \"faultFields\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Fault_Fields_Type\",\n \"fields\": [\n {\n \"name\": \"faultFieldsVersion\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmCondition\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmInterfaceA\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventSourceType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"specificProblem\",\n \"type\": \"string\"\n },\n {\n \"name\": \"eventSeverity\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vfStatus\",\n \"type\": \"string\"\n },\n {\n \"name\": \"alarmAdditionalInformation\",\n \"type\": {\n \"type\": \"record\",\n \"name\": \"Alarm_Additional_Information_Type\",\n \"fields\": [\n {\n \"name\": \"eventTime\",\n \"type\": \"string\"\n },\n {\n \"name\": \"equipType\",\n \"type\": \"string\"\n },\n {\n \"name\": \"vendor\",\n \"type\": \"string\"\n },\n {\n \"name\": \"model\",\n \"type\": \"string\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n ]\n}"}},{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"value":{"key":{"name":"LinkFailureOutputSchema","version":"0.0.1"},"schemaFlavour":"Avro","schemaDefinition":"{\n \"name\": \"Link_Failure_Output\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\": \"o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio\",\n \"type\": {\n \"type\": \"array\",\n \"items\": {\n \"name\": \"o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio_record\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\": \"id\",\n \"type\": \"string\"\n },\n {\n \"name\": \"radio_DasH_resource_DasH_management_DasH_policy_DasH_max_DasH_ratio\",\n \"type\": \"int\"\n },\n {\n \"name\": \"radio_DasH_resource_DasH_management_DasH_policy_DasH_members\",\n \"type\": {\n \"type\": \"array\",\n \"items\": {\n \"name\": \"radio_DasH_resource_DasH_management_DasH_policy_DasH_members_record\",\n \"type\": \"record\",\n \"fields\": [\n {\n \"name\": \"mobile_DasH_country_DasH_code\",\n \"type\": \"string\"\n },\n {\n \"name\": \"mobile_DasH_network_DasH_code\",\n \"type\": \"string\"\n },\n {\n \"name\": \"slice_DasH_differentiator\",\n \"type\": \"int\"\n },\n {\n \"name\": \"slice_DasH_service_DasH_type\",\n \"type\": \"int\"\n }\n ]\n }\n }\n },\n {\n \"name\": \"radio_DasH_resource_DasH_management_DasH_policy_DasH_min_DasH_ratio\",\n \"type\": \"int\"\n },\n {\n \"name\": \"user_DasH_label\",\n \"type\": \"string\"\n },\n {\n \"name\": \"resource_DasH_type\",\n \"type\": \"string\"\n },\n {\n \"name\": \"radio_DasH_resource_DasH_management_DasH_policy_DasH_dedicated_DasH_ratio\",\n \"type\": \"int\"\n },\n {\n \"name\": \"administrative_DasH_state\",\n \"type\": \"string\"\n }\n ]\n }\n }\n }\n ]\n}"}},{"key":{"name":"MessageSchema","version":"0.0.1"},"value":{"key":{"name":"MessageSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}},{"key":{"name":"OduIdSchema","version":"0.0.1"},"value":{"key":{"name":"OduIdSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}},{"key":{"name":"OruIdSchema","version":"0.0.1"},"value":{"key":{"name":"OruIdSchema","version":"0.0.1"},"schemaFlavour":"Java","schemaDefinition":"java.lang.String"}}]}}}}},"eventOutputParameters":{"RestProducer":{"carrierTechnologyParameters":{"carrierTechnology":"RESTCLIENT","parameterClassName":"org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters","parameters":{"url":"http://sdnr-sim:9990/rests/data/network-topology:network-topology/topology=topology-netconf/node={OduId}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={OduId}/radio-resource-management-policy-ratio=rrm-pol-1","httpMethod":"PUT","httpHeaders":[["Authorization","Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ=="]]}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"pojoField":"LinkFailureOutput"}},"eventNameFilter":"LinkFailureOutputEvent"},"StdOutProducer":{"carrierTechnologyParameters":{"carrierTechnology":"FILE","parameters":{"standardIo":true}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"pojoField":"message"}},"eventNameFilter":"ApexMessageOutputEvent"}},"eventInputParameters":{"DMaaPConsumer":{"carrierTechnologyParameters":{"carrierTechnology":"RESTCLIENT","parameterClassName":"org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters","parameters":{"url":"http://onap-dmaap:3904/events/unauthenticated.SEC_FAULT_OUTPUT/users/link-monitor-nonrtric?timeout=15000&limit=100"}},"eventProtocolParameters":{"eventProtocol":"JSON","parameters":{"versionAlias":"version","pojoField":"LinkFailureInput"}},"eventName":"LinkFailureInputEvent"}}}}}]}}
\ No newline at end of file
var unlockMessageArray = new java.util.ArrayList();
for (var i = 0; i < 1; i++) {
unlockMessageArray.add({
- "name" : oruId,
- "administrative_DasH_state" : "UNLOCKED"
- });
+ "id":"rrm-pol-1",
+ "radio_DasH_resource_DasH_management_DasH_policy_DasH_max_DasH_ratio":25,
+ "radio_DasH_resource_DasH_management_DasH_policy_DasH_members":
+ [
+ {
+ "mobile_DasH_country_DasH_code":"310",
+ "mobile_DasH_network_DasH_code":"150",
+ "slice_DasH_differentiator":1,
+ "slice_DasH_service_DasH_type":1
+ }
+ ],
+ "radio_DasH_resource_DasH_management_DasH_policy_DasH_min_DasH_ratio":15,
+ "user_DasH_label":"rrm-pol-1",
+ "resource_DasH_type":"prb",
+ "radio_DasH_resource_DasH_management_DasH_policy_DasH_dedicated_DasH_ratio":20,
+ "administrative_DasH_state":"unlocked"
+ });
}
-linkFailureOutput.put("o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_du_DasH_to_DasH_ru_DasH_connection", unlockMessageArray);
+linkFailureOutput.put("o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio", unlockMessageArray);
executor.outFields.put("LinkFailureOutput", linkFailureOutput.toString());
executor.getExecutionProperties().setProperty("OduId", oduId);
{
- "type": "record",
- "name": "Link_Failure_Output",
- "fields": [
- {
- "name": "o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_du_DasH_to_DasH_ru_DasH_connection",
- "type": {
- "type": "array",
- "items": {
- "name": "Config_Change_Message",
- "type": "record",
- "fields": [
- {
- "name": "name",
- "type": "string"
- },
- {
- "name": "administrative_DasH_state",
- "type": "string"
- }
- ]
+ "name": "Link_Failure_Output",
+ "type": "record",
+ "fields": [
+ {
+ "name": "o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio",
+ "type": {
+ "type": "array",
+ "items": {
+ "name": "o_DasH_ran_DasH_sc_DasH_du_DasH_hello_DasH_world_ColoN_radio_DasH_resource_DasH_management_DasH_policy_DasH_ratio_record",
+ "type": "record",
+ "fields": [
+ {
+ "name": "id",
+ "type": "string"
+ },
+ {
+ "name": "radio_DasH_resource_DasH_management_DasH_policy_DasH_max_DasH_ratio",
+ "type": "int"
+ },
+ {
+ "name": "radio_DasH_resource_DasH_management_DasH_policy_DasH_members",
+ "type": {
+ "type": "array",
+ "items": {
+ "name": "radio_DasH_resource_DasH_management_DasH_policy_DasH_members_record",
+ "type": "record",
+ "fields": [
+ {
+ "name": "mobile_DasH_country_DasH_code",
+ "type": "string"
+ },
+ {
+ "name": "mobile_DasH_network_DasH_code",
+ "type": "string"
+ },
+ {
+ "name": "slice_DasH_differentiator",
+ "type": "int"
+ },
+ {
+ "name": "slice_DasH_service_DasH_type",
+ "type": "int"
+ }
+ ]
}
- }
+ }
+ },
+ {
+ "name": "radio_DasH_resource_DasH_management_DasH_policy_DasH_min_DasH_ratio",
+ "type": "int"
+ },
+ {
+ "name": "user_DasH_label",
+ "type": "string"
+ },
+ {
+ "name": "resource_DasH_type",
+ "type": "string"
+ },
+ {
+ "name": "radio_DasH_resource_DasH_management_DasH_policy_DasH_dedicated_DasH_ratio",
+ "type": "int"
+ },
+ {
+ "name": "administrative_DasH_state",
+ "type": "string"
+ }
+ ]
}
- ]
-}
+ }
+ }
+ ]
+}
\ No newline at end of file
## Functionality
-The creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below.
+The creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below. The API is available on the host and port configured for the consumer
->- /start Creates the job in ICS.
->- /stop Deletes the job in ICS.
+>- /admin/start Creates the job in ICS.
+>- /admin/stop Deletes the job in ICS.
If the consumer is shut down with a SIGTERM, it will also delete the job before exiting.
+There is also a status call provided in the REST API. This will return the running status of the consumer as JSON.
+>- /status {"status": "started/stopped"}
+
## Development
To make it easy to test during development of the consumer, three stubs are provided in the `stub` folder.
# By default this file is in the docker build directory,
# but the location can configured in the JJB template.
---
-tag: 1.0.0
+tag: 1.0.1
}
func (c Config) String() string {
- return fmt.Sprintf("ConsumerHost: %v, ConsumerPort: %v, InfoCoordinatorAddress: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, ORUToODUMapFile: %v, ConsumerCertPath: %v, ConsumerKeyPath: %v, LogLevel: %v", c.ConsumerHost, c.ConsumerPort, c.InfoCoordinatorAddress, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.ORUToODUMapFile, c.ConsumerCertPath, c.ConsumerKeyPath, c.LogLevel)
+ return fmt.Sprintf("{ConsumerHost: %v, ConsumerPort: %v, InfoCoordinatorAddress: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, ORUToODUMapFile: %v, ConsumerCertPath: %v, ConsumerKeyPath: %v, LogLevel: %v}", c.ConsumerHost, c.ConsumerPort, c.InfoCoordinatorAddress, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.ORUToODUMapFile, c.ConsumerCertPath, c.ConsumerKeyPath, c.LogLevel)
}
func getEnv(key string, defaultVal string) string {
SDNRPassword string
}
-const rawSdnrPath = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=[O-DU-ID]/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection=[O-RU-ID]"
-const unlockMessage = `{"o-ran-sc-du-hello-world:du-to-ru-connection": [{"name":"[O-RU-ID]","administrative-state":"UNLOCKED"}]}`
+const rawSdnrPath = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=[O-DU-ID]/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions=[O-DU-ID]/radio-resource-management-policy-ratio=rrm-pol-1"
+const unlockMessage = `{"o-ran-sc-du-hello-world:radio-resource-management-policy-ratio":[{"id":"rrm-pol-1","radio-resource-management-policy-max-ratio":25,"radio-resource-management-policy-members":[{"mobile-country-code":"310","mobile-network-code":"150","slice-differentiator":1,"slice-service-type":1}],"radio-resource-management-policy-min-ratio":15,"user-label":"rrm-pol-1","resource-type":"prb","radio-resource-management-policy-dedicated-ratio":20,"administrative-state":"unlocked"}]}`
type LinkFailureHandler struct {
lookupService repository.LookupService
func (lfh LinkFailureHandler) sendUnlockMessage(oRuId string) {
if oDuId, err := lfh.lookupService.GetODuID(oRuId); err == nil {
- sdnrPath := getSdnrPath(oRuId, oDuId)
- unlockMessage := lfh.getUnlockMessage(oRuId)
+ sdnrPath := getSdnrPath(oDuId)
if error := restclient.Put(lfh.config.SDNRAddress+sdnrPath, unlockMessage, lfh.client, lfh.config.SDNRUser, lfh.config.SDNRPassword); error == nil {
log.Debugf("Sent unlock message for O-RU: %v to O-DU: %v.", oRuId, oDuId)
} else {
}
-func getSdnrPath(oRuId string, oDuId string) string {
- sdnrPath := strings.Replace(rawSdnrPath, "[O-DU-ID]", oDuId, 1)
- sdnrPath = strings.Replace(sdnrPath, "[O-RU-ID]", oRuId, 1)
+func getSdnrPath(oDuId string) string {
+ sdnrPath := strings.Replace(rawSdnrPath, "[O-DU-ID]", oDuId, -1)
return sdnrPath
}
-
-func (lfh LinkFailureHandler) getUnlockMessage(oRuId string) string {
- return strings.Replace(unlockMessage, "[O-RU-ID]", oRuId, 1)
-}
lookupServiceMock := mocks.LookupService{}
- lookupServiceMock.On("GetODuID", mock.Anything).Return("HCL-O-DU-1122", nil)
+ lookupServiceMock.On("GetODuID", mock.Anything).Return("O-DU-1122", nil)
handlerUnderTest := NewLinkFailureHandler(&lookupServiceMock, Configuration{
SDNRAddress: "http://localhost:9990",
assertions.Equal(http.MethodPut, actualRequest.Method)
assertions.Equal("http", actualRequest.URL.Scheme)
assertions.Equal("localhost:9990", actualRequest.URL.Host)
- expectedSdnrPath := "/rests/data/network-topology:network-topology/topology=topology-netconf/node=HCL-O-DU-1122/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection=ERICSSON-O-RU-11220"
+ expectedSdnrPath := "/rests/data/network-topology:network-topology/topology=topology-netconf/node=O-DU-1122/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions=O-DU-1122/radio-resource-management-policy-ratio=rrm-pol-1"
assertions.Equal(expectedSdnrPath, actualRequest.URL.Path)
assertions.Equal("application/json; charset=utf-8", actualRequest.Header.Get("Content-Type"))
tempRequest, _ := http.NewRequest("", "", nil)
tempRequest.SetBasicAuth("admin", "pwd")
assertions.Equal(tempRequest.Header.Get("Authorization"), actualRequest.Header.Get("Authorization"))
body, _ := ioutil.ReadAll(actualRequest.Body)
- expectedBody := []byte(`{"o-ran-sc-du-hello-world:du-to-ru-connection": [{"name":"ERICSSON-O-RU-11220","administrative-state":"UNLOCKED"}]}`)
+ expectedBody := []byte(`{"o-ran-sc-du-hello-world:radio-resource-management-policy-ratio":[{"id":"rrm-pol-1","radio-resource-management-policy-max-ratio":25,"radio-resource-management-policy-members":[{"mobile-country-code":"310","mobile-network-code":"150","slice-differentiator":1,"slice-service-type":1}],"radio-resource-management-policy-min-ratio":15,"user-label":"rrm-pol-1","resource-type":"prb","radio-resource-management-policy-dedicated-ratio":20,"administrative-state":"unlocked"}]}`)
assertions.Equal(expectedBody, body)
clientMock.AssertNumberOfCalls(t, "Do", 1)
logString := buf.String()
assertions.Contains(logString, "Sent unlock message")
assertions.Contains(logString, "O-RU: ERICSSON-O-RU-11220")
- assertions.Contains(logString, "O-DU: HCL-O-DU-1122")
+ assertions.Contains(logString, "O-DU: O-DU-1122")
}
func newRequest(method string, url string, bodyAsBytes []byte, t *testing.T) *http.Request {
lookupServiceMock := mocks.LookupService{}
- lookupServiceMock.On("GetODuID", mock.Anything).Return("HCL-O-DU-1122", nil)
+ lookupServiceMock.On("GetODuID", mock.Anything).Return("O-DU-1122", nil)
handlerUnderTest := NewLinkFailureHandler(&lookupServiceMock, Configuration{}, nil)
var linkfailureConfig linkfailure.Configuration
var lookupService repository.LookupService
var consumerPort string
+var started bool
func init() {
doInit()
r := mux.NewRouter()
r.HandleFunc("/", messageHandler.MessagesHandler).Methods(http.MethodPost).Name("messageHandler")
+ r.HandleFunc("/status", statusHandler).Methods(http.MethodGet).Name("status")
r.HandleFunc("/admin/start", startHandler).Methods(http.MethodPost).Name("start")
r.HandleFunc("/admin/stop", stopHandler).Methods(http.MethodPost).Name("stop")
return
}
log.Debug("Registered job.")
+ started = true
}
func stopHandler(w http.ResponseWriter, r *http.Request) {
return
}
log.Debug("Deleted job.")
+ started = false
+}
+
+func statusHandler(w http.ResponseWriter, r *http.Request) {
+ runStatus := "started"
+ if !started {
+ runStatus = "stopped"
+ }
+ fmt.Fprintf(w, `{"status": "%v"}`, runStatus)
}
func deleteOnShutdown(s chan os.Signal) {
assertions.Nil(err)
path, _ = stopHandlerRoute.GetPathTemplate()
assertions.Equal("/admin/stop", path)
+
+ statusHandlerRoute := r.Get("status")
+ assertions.NotNil(statusHandlerRoute)
+ supportedMethods, err = statusHandlerRoute.GetMethods()
+ assertions.Equal([]string{http.MethodGet}, supportedMethods)
+ assertions.Nil(err)
+ path, _ = statusHandlerRoute.GetPathTemplate()
+ assertions.Equal("/status", path)
}
func Test_startHandler(t *testing.T) {
expectedBody := wantedBody
assertions.Equal(expectedBody, body)
clientMock.AssertNumberOfCalls(t, "Do", 1)
+
+ // Check that the running status is "started"
+ statusHandler := http.HandlerFunc(statusHandler)
+ statusResponseRecorder := httptest.NewRecorder()
+ statusRequest, _ := http.NewRequest(http.MethodGet, "/status", nil)
+
+ statusHandler.ServeHTTP(statusResponseRecorder, statusRequest)
+
+ assertions.Equal(http.StatusOK, statusResponseRecorder.Code)
+ assertions.Equal(`{"status": "started"}`, statusResponseRecorder.Body.String())
})
}
}
assertions.Equal("enrichmentservice:8083", actualRequest.URL.Host)
assertions.Equal("/data-consumer/v1/info-jobs/14e7bb84-a44d-44c1-90b7-6995a92ad43c", actualRequest.URL.Path)
clientMock.AssertNumberOfCalls(t, "Do", 1)
+
+ // Check that the running status is "stopped"
+ statusHandler := http.HandlerFunc(statusHandler)
+ statusResponseRecorder := httptest.NewRecorder()
+ statusRequest, _ := http.NewRequest(http.MethodGet, "/status", nil)
+
+ statusHandler.ServeHTTP(statusResponseRecorder, statusRequest)
+
+ assertions.Equal(http.StatusOK, statusResponseRecorder.Code)
+ assertions.Equal(`{"status": "stopped"}`, statusResponseRecorder.Body.String())
})
}
}
-ERICSSON-O-RU-11220,HCL-O-DU-1122
-ERICSSON-O-RU-11221,HCL-O-DU-1122
-ERICSSON-O-RU-11222,HCL-O-DU-1122
-ERICSSON-O-RU-11223,HCL-O-DU-1122
-ERICSSON-O-RU-11223,HCL-O-DU-1122
-ERICSSON-O-RU-11224,HCL-O-DU-1123
-ERICSSON-O-RU-11225,HCL-O-DU-1123
-ERICSSON-O-RU-11226,HCL-O-DU-1123
-ERICSSON-O-RU-11227,HCL-O-DU-1124
-ERICSSON-O-RU-11228,HCL-O-DU-1125
-ERICSSON-O-RU-11229,HCL-O-DU-1125
\ No newline at end of file
+ERICSSON-O-RU-11220,O-DU-1122
+ERICSSON-O-RU-11221,O-DU-1122
+ERICSSON-O-RU-11222,O-DU-1122
+ERICSSON-O-RU-11223,O-DU-1122
+ERICSSON-O-RU-11223,O-DU-1122
+ERICSSON-O-RU-11224,O-DU-1123
+ERICSSON-O-RU-11225,O-DU-1123
+ERICSSON-O-RU-11226,O-DU-1123
+ERICSSON-O-RU-11227,O-DU-1124
+ERICSSON-O-RU-11228,O-DU-1125
+ERICSSON-O-RU-11229,O-DU-1125
\ No newline at end of file
+++ /dev/null
-<!--
- ============LICENSE_START=======================================================
- Copyright (C) 2021 Nordix Foundation.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
- SPDX-License-Identifier: Apache-2.0
- ============LICENSE_END=========================================================
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <groupId>oransc.org</groupId>
- <artifactId>o-ru-closed-loop-consumer</artifactId>
- <version>1.0.0</version>
- <properties>
- <docker-maven-plugin.version>0.30.0</docker-maven-plugin.version>
- </properties>
-
- <build>
- <plugins>
- <plugin>
- <artifactId>exec-maven-plugin</artifactId>
- <groupId>org.codehaus.mojo</groupId>
- <executions>
- <execution>
- <id>Build Go binary</id>
- <phase>generate-sources</phase>
- <goals>
- <goal>exec</goal>
- </goals>
- <configuration>
- <executable>${basedir}/build_and_test.sh</executable>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <groupId>io.fabric8</groupId>
- <artifactId>docker-maven-plugin</artifactId>
- <version>${docker-maven-plugin.version}</version>
- <inherited>false</inherited>
- <executions>
- <execution>
- <id>generate-nonrtric-o-ru-closed-loop-consumer-image</id>
- <phase>package</phase>
- <goals>
- <goal>build</goal>
- </goals>
- <configuration>
- <pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
- <images>
- 
- </images>
- </configuration>
- </execution>
- <execution>
- <id>push-nonrtric-o-ru-closed-loop-consumer-image</id>
- <goals>
- <goal>build</goal>
- <goal>push</goal>
- </goals>
- <configuration>
- <pullRegistry>${env.CONTAINER_PULL_REGISTRY}</pullRegistry>
- <pushRegistry>${env.CONTAINER_PUSH_REGISTRY}</pushRegistry>
- <images>
- 
- </images>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-</project>
func main() {
port := flag.Int("port", 8083, "The port this consumer will listen on")
flag.Parse()
- fmt.Println("Starting SDNR stub on port ", *port)
+ fmt.Println("Starting ICS stub on port ", *port)
r := mux.NewRouter()
r.HandleFunc("/data-consumer/v1/info-jobs/{jobId}", handleCalls).Methods(http.MethodPut, http.MethodDelete)
started = true
fmt.Println("Start pushing messages for job: ", id)
- startPushingMessages()
+ go startPushingMessages()
}
func deleteJobHandler(w http.ResponseWriter, r *http.Request) {
flag.Parse()
r := mux.NewRouter()
- r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={O-DU-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection={O-RU-ID}", handleData)
+ r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={O-DU-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions={O-DU-ID}/radio-resource-management-policy-ratio=rrm-pol-1", handleData)
fmt.Println("Starting SDNR on port: ", *port)
fmt.Println(http.ListenAndServe(fmt.Sprintf(":%v", *port), r))
RUN pip install -r requirements.txt
+ARG user=nonrtric
+ARG group=nonrtric
+
+RUN groupadd $user && \
+ useradd -r -g $group $user
+RUN chown -R $user:$group /usr/src/app/
+
+USER ${user}
+
CMD [ "python3", "-u", "main.py" ]
import time
MR_PATH = "/events/[TOPIC]/users/test/"
-SDNR_PATH = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=[O-DU-ID]/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection=[O-RU-ID]"
+SDNR_PATH = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=[O-DU-ID]/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions=[O-DU-ID]/radio-resource-management-policy-ratio=rrm-pol-1"
FAUILT_ID = "28"
UNLOCK_MESSAGE = {
- "o-ran-sc-du-hello-world:du-to-ru-connection": [
+ "o-ran-sc-du-hello-world:radio-resource-management-policy-ratio":
+ [
{
- "name":"",
- "administrative-state":"UNLOCKED"
+ "id":"rrm-pol-1",
+ "radio-resource-management-policy-max-ratio":25,
+ "radio-resource-management-policy-members":
+ [
+ {
+ "mobile-country-code":"310",
+ "mobile-network-code":"150",
+ "slice-differentiator":1,
+ "slice-service-type":1
+ }
+ ],
+ "radio-resource-management-policy-min-ratio":15,
+ "user-label":"rrm-pol-1",
+ "resource-type":"prb",
+ "radio-resource-management-policy-dedicated-ratio":20,
+ "administrative-state":"unlocked"
}
]
}
o_du_id = o_ru_to_o_du_map[o_ru_id]
verboseprint("O-DU ID: " + o_du_id)
unlock_msg = json.loads(json.dumps(UNLOCK_MESSAGE))
- unlock_msg["o-ran-sc-du-hello-world:du-to-ru-connection"][0]["name"] = o_ru_id
- send_path = SDNR_PATH.replace("[O-DU-ID]", o_du_id).replace("[O-RU-ID]", o_ru_id)
+ send_path = SDNR_PATH.replace("[O-DU-ID]", o_du_id)
requests.put(sdnr_address + send_path, auth=(sdnr_user, sdnr_pwd), json=unlock_msg)
else:
print("ERROR: No mapping for O-RU ID: " + o_ru_id)
{
- "ERICSSON-O-RU-11220": "HCL-O-DU-1122",
- "ERICSSON-O-RU-11221": "HCL-O-DU-1122",
- "ERICSSON-O-RU-11222": "HCL-O-DU-1122",
- "ERICSSON-O-RU-11223": "HCL-O-DU-1122",
- "ERICSSON-O-RU-11223": "HCL-O-DU-1122",
- "ERICSSON-O-RU-11224": "HCL-O-DU-1123",
- "ERICSSON-O-RU-11225": "HCL-O-DU-1123",
- "ERICSSON-O-RU-11226": "HCL-O-DU-1123",
- "ERICSSON-O-RU-11227": "HCL-O-DU-1124",
- "ERICSSON-O-RU-11228": "HCL-O-DU-1125",
- "ERICSSON-O-RU-11229": "HCL-O-DU-1125",
+ "ERICSSON-O-RU-11220": "O-DU-1122",
+ "ERICSSON-O-RU-11221": "O-DU-1122",
+ "ERICSSON-O-RU-11222": "O-DU-1122",
+ "ERICSSON-O-RU-11223": "O-DU-1122",
+ "ERICSSON-O-RU-11223": "O-DU-1122",
+ "ERICSSON-O-RU-11224": "O-DU-1123",
+ "ERICSSON-O-RU-11225": "O-DU-1123",
+ "ERICSSON-O-RU-11226": "O-DU-1123",
+ "ERICSSON-O-RU-11227": "O-DU-1124",
+ "ERICSSON-O-RU-11228": "O-DU-1125",
+ "ERICSSON-O-RU-11229": "O-DU-1125",
}
\ No newline at end of file
# Server info
HOST_IP = "::"
HOST_PORT = 9990
-APP_URL = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=<string:o_du_id>/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection=<string:o_ru_id>"
+APP_URL = "/rests/data/network-topology:network-topology/topology=topology-netconf/node=<string:o_du_id>/yang-ext:mount/o-ran-sc-du-hello-world:network-function/distributed-unit-functions=<string:o_du_id2>/radio-resource-management-policy-ratio=rrm-pol-1"
USERNAME = "admin"
PASSWORD = "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
class AlarmClearThread (threading.Thread):
- def __init__(self, sleep_time, o_ru_id):
+ def __init__(self, sleep_time, o_du_id):
threading.Thread.__init__(self)
self.sleep_time = sleep_time
- self.o_ru_id = o_ru_id
+ self.o_du_id = o_du_id
def run(self):
- print(f'Sleeping: {self.sleep_time} before clearing O-DU: {self.o_ru_id}')
+ print(f'Sleeping: {self.sleep_time} before clearing O-DU: {self.o_du_id}')
time.sleep(self.sleep_time)
msg_as_json = json.loads(json.dumps(linkFailureMessage))
- msg_as_json["event"]["commonEventHeader"]["sourceName"] = self.o_ru_id
- print("Sedning alarm clear for O-RU: " + self.o_ru_id)
+ msg_as_json["event"]["commonEventHeader"]["sourceName"] = self.o_du_id
+ print("Sedning alarm clear for O-DU: " + self.o_du_id)
requests.post(mr_host + ":" + mr_port + MR_PATH, json=msg_as_json);
@app.route(APP_URL,
methods=['PUT'])
@auth.login_required
-def sendrequest(o_du_id, o_ru_id):
- print("Got request with O-DU ID: " + o_du_id + " and O-RU ID: " + o_ru_id)
+def sendrequest(o_du_id, o_du_id2):
+ print("Got request with O-DU ID: " + o_du_id)
random_time = int(10 * random.random())
- alarm_clear_thread = AlarmClearThread(random_time, o_ru_id)
+ alarm_clear_thread = AlarmClearThread(random_time, o_du_id)
alarm_clear_thread.start()
return Response(status=200)