Updated installation and components 02/11102/5
authorBjornMagnussonXA <bjorn.magnusson@est.tech>
Mon, 15 May 2023 09:23:35 +0000 (11:23 +0200)
committerBjornMagnussonXA <bjorn.magnusson@est.tech>
Tue, 23 May 2023 10:44:08 +0000 (12:44 +0200)
Issue-ID: NONRTRIC-854
Signed-off-by: BjornMagnussonXA <bjorn.magnusson@est.tech>
Change-Id: I6d23663dedb09eb0684e158112a39db41476ec76

73 files changed:
https-server/Dockerfile
https-server/README.md
https-server/build.sh
https-server/certs/.gitignore [deleted file]
https-server/certs/server.crt [new file with mode: 0644]
https-server/certs/server.key [new file with mode: 0644]
https-server/gen-cert.sh
install/README.md [new file with mode: 0644]
install/TODO.txt [deleted file]
install/helm/global-values.yaml [moved from install/helm/nrt-pm/charts/dfc/values.yaml with 92% similarity]
install/helm/nrt-base-0/charts/kafka-client/Chart.yaml
install/helm/nrt-base-0/charts/kafka-client/templates/app-pod.yaml
install/helm/nrt-base-1/charts/minio/templates/app-statefulset.yaml
install/helm/nrt-base-1/charts/minio/values.yaml
install/helm/nrt-base-1/charts/ves-mr/Chart.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/config/MsgRtrApi.properties [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/config/collector.properties [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/config/logback.xml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/config/ves-dmaap-config.json [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap1.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap2.yaml [moved from install/helm/nrt-pm-log/values.yaml with 85% similarity]
install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap3.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap4.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-message-router.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-ves-collector.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-zk-tunnel.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-networkpolicy-zk-tunnel.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-service-message-router.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-service-ves-collector.yaml [new file with mode: 0644]
install/helm/nrt-base-1/charts/ves-mr/templates/app-service-zk-tunnel.yaml [new file with mode: 0644]
install/helm/nrt-pm-log/.gitignore
install/helm/nrt-pm-log/TODO.txt [deleted file]
install/helm/nrt-pm-log/config/jobDefinition.json
install/helm/nrt-pm-log/templates/app-statefulset.yaml
install/helm/nrt-pm-rapp/templates/app-pod.yaml
install/helm/nrt-pm/charts/dfc/.gitignore [new file with mode: 0644]
install/helm/nrt-pm/charts/dfc/templates/app-statefulset.yaml
install/helm/nrt-pm/charts/ics/templates/app-deployment.yaml
install/helm/nrt-pm/charts/kafka-producer-pm-json2influx/templates/app-statefulset.yaml
install/helm/nrt-pm/charts/kafka-producer-pm-json2kafka/templates/app-statefulset.yaml
install/helm/nrt-pm/charts/kafka-producer-pm-xml2json/templates/app-statefulset.yaml
install/helm/nrt-pm/charts/pm-producer-json2kafka/.gitignore [new file with mode: 0644]
install/helm/nrt-pm/charts/pm-producer-json2kafka/templates/app-statefulset.yaml
install/helm/nrt-pm/charts/pm-producer-json2kafka/values.yaml
install/helm/ran/templates/app-deployment.yaml
install/install-nrt.sh
install/install-pm-influx-job.sh
install/install-pm-log.sh
install/install-pm-rapp.sh
install/opa-rules/README.md
install/scripts/README.md [new file with mode: 0644]
install/scripts/create_ics_job.sh [changed mode: 0644->0755]
install/scripts/create_topic.sh
install/scripts/get_influxdb2_token.sh
install/scripts/kafka-client-send-genfiles-file-ready.sh
install/scripts/populate_keycloak.sh
install/scripts/push-genfiles-to-file-ready-topic.sh
install/scripts/push-genfiles-to-ves-collector.sh [new file with mode: 0755]
install/uninstall-nrt.sh
pm-file-converter/.gitignore
pm-file-converter/Dockerfile
pm-file-converter/README.md
pm-file-converter/build.sh
pm-file-converter/gen-cert.sh
pm-file-converter/main.go
pm-file-converter/server.crt [new file with mode: 0644]
pm-file-converter/server.key [new file with mode: 0644]
pm-rapp/Dockerfile
pm-rapp/README.md
pm-rapp/TODO.txt [deleted file]
pm-rapp/build.sh
pm-rapp/container.yaml
pm-rapp/main.go

index 941acb2..7792de2 100644 (file)
 #  ============LICENSE_END=================================================
 #
 
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
 WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
 COPY main.go .
+RUN go mod init main
+RUN go mod tidy
+
 RUN go build -o /pm-https-server
 
 #Replaced distroless image with ubuntu for debug purposes
index 757eeec..e5e6e93 100644 (file)
@@ -5,7 +5,7 @@
 
 This server can be used to simulate a RAN node for file download over https.
 Files can be requested in three ways:
-- static file (always the same files returned)
+- static file (always the same file returned)
 - semi-static files (the requested file must exist in the container)
 - generated files (file contents is generated using a template where the start/stop time as well the node name is based on requested file. Counter values are also generated)
 
@@ -20,7 +20,7 @@ Build for remote kubernetes - an externally accessible image repo (e.g. docker h
 
 
 ### Configuration
-The following env vars (all optional) may be set to control the behavior of the server
+The following env vars (all optional) may be set to control the behaviour of the server
 
 - ALWAYS_RETURN - Name of a file under "/files" in the container that is always returned regardless of requested file on the url `/files/<file-id>`. The can be used when the file contents is not important.
 
@@ -30,7 +30,7 @@ The following env vars (all optional) may be set to control the behavior of the
 
 If generated files shall be used, load the file pm-template.xml.gz to the /template-files dir in the container.
 
-Configure the following for desired behaviou
+Configure the following for desired behaviour
 - static file: ALWAYS_RETURN
 - semi-static files: none
 - generated files: GENERATED_FILE_START_TIME and GENERATED_FILE_TIMEZONE
index c7aabac..8647a0b 100755 (executable)
 #
 
 # Build image from Dockerfile with/without custom image tag
-# Optionally push to external docker hub repo
+# Optionally push to external image repo
 
 print_usage() {
     echo "Usage: build.sh no-push|<docker-hub-repo-name> [<image-tag>]"
     exit 1
 }
 
-if [ $# -ne 1 ] && [ $# -ne 2 ]; then
+if [ $# -lt 1 ] || [ $# -gt 2 ]; then
     print_usage
 fi
 
@@ -39,14 +39,39 @@ else
     echo "Attempt to push built image to: "$REPO
 fi
 
-if [ "$2" != "" ]; then
-    IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+shift
+while [ $# -ne 0 ]; do
+    if [ $1 == "--tag" ]; then
+        shift
+        if [ -z "$1" ]; then
+            print_usage
+        fi
+        IMAGE_TAG=$1
+        echo "Setting image tag to: "$IMAGE_TAG
+        shift
+    else
+        echo "Unknown parameter: $1"
+        print_usage
+    fi
+done
+
+./gen-cert.sh
+
+echo ""
+echo "Certs generated"
 
 IMAGE=$IMAGE_NAME:$IMAGE_TAG
-echo "Building image $IMAGE"
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+    echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
+echo "Building image: $IMAGE with architecture: $DOCKER_DEFAULT_PLATFORM"
+
 docker build -t $IMAGE_NAME:$IMAGE_TAG .
+
 if [ $? -ne 0 ]; then
     echo "BUILD FAILED"
     exit 1
diff --git a/https-server/certs/.gitignore b/https-server/certs/.gitignore
deleted file mode 100644 (file)
index be870b4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-*.crt
-*.key
diff --git a/https-server/certs/server.crt b/https-server/certs/server.crt
new file mode 100644 (file)
index 0000000..f80319e
--- /dev/null
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICmDCCAYACCQCzTWEB5G++JDANBgkqhkiG9w0BAQsFADANMQswCQYDVQQGEwJT
+RTAgFw0yMzA1MTcxMjQ2MjVaGA8yMDUwMTAwMTEyNDYyNVowDTELMAkGA1UEBhMC
+U0UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDrFmC9idjCLOYkV0Nb
+R61ZCEBNuLF+UAsZ7GrS+WaME81CuEDZLeHsqwCOiPKf6XF23XHoXqZ+lFY2+Bv0
+uf+FcR3Y7p3BcQn4Ier65ybvHRc/53RspOLdi4TXttEjhGEJsApFsnOSYEkDv/1x
+TU9etwUvjWNdopKQ/fszV6WB1JgLvdSC4US+Do/+V3vUJWW8aJhTfUUtUfyVreJr
++/Rs+lfyMcoBv6d7SsyImO4Lq1Gv4giyKu4D3R9Vvz6CNp2mTKrhgiVbnSKMl1cm
+bkewFPuAvbjxxNGXUWipGug3pJgTxWS8/IyfyQ6TXl5fWvDRt5wrXNbc3nrJVoSs
+1QPFAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACfMN8UZSGzHNsFbiuDoy0hSZzDk
+2ZpJV+BgtH/119lgIRzEEOC749rx68T55B8vKH8go1Pio3sVeDp2bZ7phG9Bcxn9
+hTIN9VJQ9vVnPrnaPzDuQdzY+4FyTMcMXtgHfC7Nu4bYor+rXbqdmv+RrucG9jpg
+uZaLdgtyYK+vpEqLauRYc3wWzyDtV6Td/8r4htxf+zslWvrQ1AXEXf5uuozWTTZJ
+g23vQ243NIQ9MF430QS40uvBBssACeI3NG8aD/OfhWO7TIKr69y+EfHu7i/hcFO1
+LkRj+CMt70TQMULHGeG7CP8VZJNHJz0kCZWj6R+Z9MOA/dVaIiyAS5LI/uY=
+-----END CERTIFICATE-----
diff --git a/https-server/certs/server.key b/https-server/certs/server.key
new file mode 100644 (file)
index 0000000..927d144
--- /dev/null
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDrFmC9idjCLOYk
+V0NbR61ZCEBNuLF+UAsZ7GrS+WaME81CuEDZLeHsqwCOiPKf6XF23XHoXqZ+lFY2
++Bv0uf+FcR3Y7p3BcQn4Ier65ybvHRc/53RspOLdi4TXttEjhGEJsApFsnOSYEkD
+v/1xTU9etwUvjWNdopKQ/fszV6WB1JgLvdSC4US+Do/+V3vUJWW8aJhTfUUtUfyV
+reJr+/Rs+lfyMcoBv6d7SsyImO4Lq1Gv4giyKu4D3R9Vvz6CNp2mTKrhgiVbnSKM
+l1cmbkewFPuAvbjxxNGXUWipGug3pJgTxWS8/IyfyQ6TXl5fWvDRt5wrXNbc3nrJ
+VoSs1QPFAgMBAAECggEAFlnnUr4DbNrF+tiNH+WdtqRRMNKJlZ/YnltbALoTpOfR
+ETHhgISbQVw0zlh48PlJ/2oohVZScCB8XfeS+N6iS7aohtKRDy5HK19WLwrBKeTT
+LBE+gYHfy/6S38uS8NSKQViKcXv4/wbGimO7ngUisbem95FyzBlD5CMxufzwUHqO
+WlrDpau38ehrNhXR+4gaU5fC0X7/njOYGjWXNmXAlswAs4TUO8gPwZdufzZyadoS
+AVJ9ZGsqn4VlxdIwyY0CiICtdKDmKX7YG3hYKay9LisSm95+Go0rzvPpa4SMWvV9
+tWzzWtdh+MsNbeI0OknXILQk3ifaFzhh/KZRBwOq4QKBgQD9By9qTe0aEyVi0oVH
+6Jy265c/aglQfkU9aIV5WTzF+JsLXYvoOBKT8Bk9GdxEaM9jL+h6BqZqcVqujqxx
+Ntcxks2tBh4glcbnPScOzur+fIYLXcjirZJNx3yft9DfKZjS5lNbKTswHuosGCPK
+LPnuhnz7I5I8aDkFhADeuTOV2QKBgQDt2T99n0Q3QURBu+eQbcUAVzPvJhoOrqvb
+Kc0vRGt9+O3NXEH5Q2YEufDECS1qOtp1XZECCkcMmnAlS7Juz03/rFK+smobmYwM
+deqmA76o2dGLujoQ7w7hJtHFOizUR/DfjrUY1AXbh9qk0MhbudVDGn28/zdYTlXz
+RuIhevoNzQKBgQCzXvOa/ZEWyfnX23uGZX0rI0n+N3JQ8KKvDLiKNNujUEDBRtiW
+j6GD5FJQAVQn3tEd9Gluj+ZLUP5C/nt6arEUwIgzn0GeQe9WIADfO4pVS/tOdXai
+Uv+DSear5wgYG4nuAD+ZQVpnG5NQHPDKMyYelJJnCmlxj0TVByYCvfG2yQKBgQCz
+UQNl8Sobwk/0gvbM04UfgZ784Kvqf9O/Ep/Hz/x+Z6rZFYIDq8WBMLINCaI8oYxL
+ybPmZtsz7Ec6RvyKQC5c4I0tihMnJbcJOekjKlWWtUke39KhK6n7IyopWHetv2Mh
+GFT+F/MmlCDJ+0HirZLT2WgMkhkmsUBpffpEJZPZzQKBgQDQKT183favwULOkQaP
+AGItpQVt3aYyuZYi5veIDlzk3tatAIzya/G6Bqfnpy9CO6yPKdp1pkJxM1xb374T
+z6Mz4A4NPOD2Ofd0WAmiIkP0H1+zfjjtmdBO/9PpASLByNxt2MUdjWvaGk2w7U42
+Eb1duJr2Yud1NfUvKI7dZudF1A==
+-----END PRIVATE KEY-----
index 4678361..f44d798 100755 (executable)
@@ -17,7 +17,7 @@
 #  ============LICENSE_END=================================================
 #
 
-#Generate basic cert and key for web server
+#Generate basic cert and key for https server
 
 cat <<__EOF__ |  openssl req -new -newkey rsa:2048 -sha256 -nodes -x509 -keyout certs/server.key -out certs/server.crt -days 9999
 SE
diff --git a/install/README.md b/install/README.md
new file mode 100644 (file)
index 0000000..3d616a8
--- /dev/null
@@ -0,0 +1,112 @@
+
+
+## Prerequisites
+
+The ranpm setup works on linux/MacOS or on windows via WSL using a local or remote kubernetes cluster.
+
+- local kubectl
+- kubernetes cluster
+- local docker for building images
+
+It is recommended to run the ranpm on a kubernetes cluster instead of local docker-desktop etc as the setup requires a fair amount of computer resouces.
+
+# Requirement on kubernetes
+
+The demo set can be run on local or remote kubernetes.
+Kubectl must be configured to point to the applicable kubernetes instance.
+Nodeports exposed by the kubernetes instance must be accessible by the local machine - basically the kubernetes control plane IP needs to be accessible from the local machine.
+
+- Latest version of istio install
+
+# Other requirements
+- helm3
+- bash
+- cmd 'envsubst' must be installed (check by cmd: 'type envsubst' )
+- cmd 'jq' must be installed (check by cmd: 'type jq' )
+
+## Before installation
+The following images need to be built manually. If remote or multi node cluster is used, then an image repo needs to be available to push the built images to.
+If external repo is used, use the same repo for all built images and configure the reponame in `helm/global-values.yaml` (the parameter value of extimagerepo shall have a trailing `/`)
+
+Build the following images (build instruction in each dir)
+- ranpm/https-server
+- ranpm/pm-file-converter
+- pm-rapp
+
+
+## Installation
+
+The installation is made by a few scripts.
+The main part of the ranpm is installed by a single script. Then, additional parts can be added on top. All installations in kubernetes is made by helm charts.
+
+The following scripts are provided for installing (install-nrt.sh mush be installed first):
+
+- install-nrt.sh : Installs the main parts of the ranpm setup
+- install-pm-log.sh : Installs the producer for influx db
+- install-pm-influx-job.sh : Sets up an alternative job to produce data stored in influx db.
+- install-pm-rapp.sh : Installs a rapp that subscribe and print out received data
+
+## Unstallation
+
+There is a corresponding uninstall script for each install script. However, it is enough to just run `uninstall-nrt.sh` and `uninstall-pm-rapp.sh´.
+
+## Exposed ports to APIs
+All exposed APIs on individual port numbers (nodeporta) on the address of the kubernetes control plane.
+
+### Keycloak API
+Keycloak API accessed via proxy (proxy is needed to make keycloak issue token with the internal address of keycloak).
+- nodeport: 31784
+
+### OPA rules bundle server
+Server for posting updated OPA rules.
+- nodeport: 32201
+
+### Information coordinator Service
+Direct access to ICS API.
+-nodeports (http and https): 31823, 31824
+
+### Ves-Collector
+Direct access to the Ves-Collector
+- nodeports (http and https): 31760, 31761
+
+## Exposed ports to admin tools
+As part of the ranpm installation, a number of admin tools are installed.
+The tools are accessed via a browser on individual port numbers (nodeports) on the address of the kubernetes control plane.
+
+### Keycload admin console
+Admin tool for keycloak.
+- nodeport : 31788
+- user: admin
+- password: admin
+
+### Redpanda consule
+With this tool the topics, consumer etc can be viewed.
+- nodeport: 31767
+
+### Minio web
+Browser for minio filestore.
+- nodeport: 31768
+- user: admin
+- password: adminadmin
+
+### Influx db
+Browser for influx db.
+- nodeport: 31812
+- user: admin
+- password: mySuP3rS3cr3tT0keN
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/install/TODO.txt b/install/TODO.txt
deleted file mode 100644 (file)
index e13b220..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-app versions in Chart.yaml
-
-Add parameters to config in values.yaml
-
-Handle password in secured secrets
-
-staging/release images (configurable?) and image pull policy
-
-use other minio user than admin for producers etc
-
-###############################
-
-mc alias set minio http://minio.nonrtric:9000   admin adminadmin
-
-mc admin user add minio testa testatesta
-
similarity index 92%
rename from install/helm/nrt-pm/charts/dfc/values.yaml
rename to install/helm/global-values.yaml
index 76e92db..fb58a79 100644 (file)
@@ -15,5 +15,6 @@
 #  ============LICENSE_END=================================================
 #
 
-dfc:
-  clientsecret: Akzki8aSLHL0GVNIx0k1wDrzbB56CVh1
\ No newline at end of file
+global:
+  extimagerepo: bjornmagnussonest/
+  numhttpsservers: 10
index 92b20aa..d69a836 100644 (file)
@@ -16,7 +16,7 @@
 #
 
 apiVersion: v2
-name: client
+name: kafka-client
 description: Kafka client helm chart
 
 # A chart can be either an 'application' or a 'library' chart.
index d44896a..8ef8b8f 100644 (file)
 apiVersion: v1
 kind: Pod
 metadata:
-  name: client
+  name: kafka-client
   namespace: nonrtric
   labels:
-    app: client
+    app: kafka-client
 spec:
   restartPolicy: Always
   containers:
-  - name: client
+  - name: kafka-client
     image: confluentinc/cp-kafka:7.2.2
     command: ['sh', '-c', 'while [ true ];do sleep 60;done']
     imagePullPolicy: IfNotPresent
index 5154bad..cba60c2 100644 (file)
@@ -44,7 +44,9 @@ spec:
           name: data-vol
       containers:
       - name: minio
-        image: minio/minio:RELEASE.2023-02-27T18-10-45Z
+        # Note, in later releases only SSO seem to be possible
+        # so earlier release kept to be able to login with user/pwd
+        image: minio/minio:RELEASE.2022-10-21T22-37-48Z
         imagePullPolicy: IfNotPresent
         ports:
         - name: tcpmain
index 9ce6e2a..59413ce 100644 (file)
@@ -17,4 +17,4 @@
 
 minio:
   opa:
-    decisionlogs: false
\ No newline at end of file
+    decisionlogs: true
\ No newline at end of file
diff --git a/install/helm/nrt-base-1/charts/ves-mr/Chart.yaml b/install/helm/nrt-base-1/charts/ves-mr/Chart.yaml
new file mode 100644 (file)
index 0000000..f180baa
--- /dev/null
@@ -0,0 +1,41 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2023 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v2
+name: ves-mr
+description: Ves-collector and message-router
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "0.1.0"
diff --git a/install/helm/nrt-base-1/charts/ves-mr/config/MsgRtrApi.properties b/install/helm/nrt-base-1/charts/ves-mr/config/MsgRtrApi.properties
new file mode 100644 (file)
index 0000000..b72f499
--- /dev/null
@@ -0,0 +1,135 @@
+{{/*
+# LICENSE_START=======================================================
+#  org.onap.dmaap
+#  ================================================================================
+#  Copyright Â© 2017 AT&T Intellectual Property. All rights reserved.
+#  Modifications Copyright Â© 2021-2022 Nordix Foundation
+#  ================================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=========================================================
+#
+#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+*/}}
+##
+## Kafka Connection
+##
+##        Items below are passed through to Kafka's producer and consumer
+##        configurations (after removing "kafka.")
+##        if you want to change request.required.acks it can take this one value
+#kafka.request.required.acks=-1
+kafka.metadata.broker.list=kafka-1-kafka-bootstrap.nonrtric:9092
+config.zk.servers=zoo-entrance.nonrtric:2181
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+##        Secured Config
+##
+##        Some data stored in the config system is sensitive -- API keys and secrets,
+##        for example. to protect it, we use an encryption layer for this section
+##        of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+##        Kafka expects live connections from the consumer to the broker, which
+##        obviously doesn't work over connectionless HTTP requests. The Cambria
+##        server proxies HTTP requests into Kafka consumer sessions that are kept
+##        around for later re-use. Not doing so is costly for setup per request,
+##        which would substantially impact a high volume consumer's performance.
+##
+##        This complicates Cambria server failover, because we often need server
+##        A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+##        This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+##############################################################################
+##AAF Properties
+forceAAF=false
+useCustomAcls=false
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
diff --git a/install/helm/nrt-base-1/charts/ves-mr/config/collector.properties b/install/helm/nrt-base-1/charts/ves-mr/config/collector.properties
new file mode 100644 (file)
index 0000000..3cd0a1d
--- /dev/null
@@ -0,0 +1,77 @@
+###############################################################################
+##
+## Collector Server config
+##
+##      - Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP(S) service
+##
+##      Normally:
+##
+##              - 8080 is http service
+##              - https is disabled by default
+##
+##              - At this time, the server always binds to 0.0.0.0
+##
+##
+collector.service.port=8080
+
+## Authentication is only supported via secure port
+## When enabled - require valid keystore defined
+collector.service.secure.port=8443
+
+# auth.method flags:
+#
+# noAuth - default option - no security (http)
+# certBasicAuth - auth by certificate and basic auth username / password (https)
+#auth.method=certBasicAuth
+auth.method=noAuth
+
+## Combination of userid,hashPassword encoded pwd list to be supported
+## userid and pwd comma separated; pipe delimitation between each pair
+## Password is generated by crypt-password library using BCrypt algorithm stored in dcaegen2/sdk package
+## or https://nexus.onap.org/#nexus-search;quick~crypt-password
+header.authlist=sample1,$2a$10$0buh.2WeYwN868YMwnNNEuNEAMNYVU9.FSMJGyIKV3dGET/7oGOi6
+
+## The keystore must be setup per installation when secure port is configured
+collector.keystore.file.location=etc/keystore
+collector.keystore.passwordfile=etc/passwordfile
+
+collector.cert.subject.matcher=etc/certSubjectMatcher.properties
+
+## The truststore must be setup per installation when mutual tls support is configured
+collector.truststore.file.location=etc/truststore
+collector.truststore.passwordfile=etc/trustpasswordfile
+
+## Schema Validation checkflag
+## default no validation checkflag (-1)
+## If enabled (1) - schemafile location must be specified
+collector.schema.checkflag=1
+collector.schema.file={\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\",\"v7\":\"./etc/CommonEventFormat_30.2.1_ONAP.json\"}
+
+## Schema StndDefinedFields Validation checkflag
+## default no validation checkflag (-1)
+## If enabled (1) - schema files locations must be specified, mapping file path must be specified, schema reference path
+## in event json must be specified, path to stndDefined data field in event json must be specified
+collector.externalSchema.checkflag=1
+collector.externalSchema.schemasLocation=./etc/externalRepo/
+collector.externalSchema.mappingFileLocation=./etc/externalRepo/schema-map.json
+event.externalSchema.schemaRefPath=$.event.stndDefinedFields.schemaReference
+event.externalSchema.stndDefinedDataPath=$.event.stndDefinedFields.data
+
+## List all streamid per domain to be supported. The streamid should match to channel name on dmaapfile
+collector.dmaap.streamid=fault=ves-fault|syslog=ves-syslog|heartbeat=ves-heartbeat|measurementsForVfScaling=ves-measurement|mobileFlow=ves-mobileflow|other=ves-other|stateChange=ves-statechange|thresholdCrossingAlert=ves-thresholdCrossingAlert|voiceQuality=ves-voicequality|sipSignaling=ves-sipsignaling|notification=ves-measurement|pnfRegistration=ves-pnfRegistration|3GPP-FaultSupervision=ves-3gpp-fault-supervision|3GPP-Heartbeat=ves-3gpp-heartbeat|3GPP-Provisioning=ves-3gpp-provisioning|3GPP-PerformanceAssurance=ves-3gpp-performance-assurance|o-ran-sc-du-hello-world-pm-streaming-oas3=ves-o-ran-sc-du-hello-world-pm-streaming-oas3
+collector.dmaapfile=etc/ves-dmaap-config.json
+
+## Path to the file containing description of api versions
+collector.description.api.version.location=etc/api_version_description.json
+
+## Event transformation Flag - when set expects configurable transformation
+## defined under ./etc/eventTransform.json
+## Enabled by default; to disable set to 0
+event.transform.flag=1
+
+# Describes at what frequency (measured in minutes) should application try to fetch config from CBS
+collector.dynamic.config.update.frequency=5
diff --git a/install/helm/nrt-base-1/charts/ves-mr/config/logback.xml b/install/helm/nrt-base-1/charts/ves-mr/config/logback.xml
new file mode 100644 (file)
index 0000000..949a893
--- /dev/null
@@ -0,0 +1,204 @@
+<!--
+     ============LICENSE_START=======================================================
+     Copyright Â© 2019 AT&T Intellectual Property. All rights reserved.
+     Modifications Copyright Â© 2021-2022 Nordix Foundation
+     ================================================================================
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+           http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+     ============LICENSE_END=========================================================
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+  <contextName>${module.ajsc.namespace.name}</contextName>
+  <jmxConfigurator />
+  <property name="logDirectory" value="${AJSC_HOME}/log" />
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+      </pattern>
+    </encoder>
+  </appender>
+
+  <appender name="INFO" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>INFO</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+  </appender>
+
+  <appender name="DEBUG" class="ch.qos.logback.core.ConsoleAppender">
+
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+  <appender name="ERROR" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.LevelFilter">
+      <level>ERROR</level>
+      <onMatch>ACCEPT</onMatch>
+      <onMismatch>DENY</onMismatch>
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+
+
+  <!-- Msgrtr related loggers -->
+  <logger name="org.onap.dmaap.dmf.mr.service" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.service.impl" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.resources" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.resources.streamReaders" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.backends" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.kafka" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.backends.memory" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.beans" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.constants" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.exception" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.listener" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.metrics.publisher.impl" level="TRACE" />
+
+
+
+  <logger name="org.onap.dmaap.dmf.mr.security" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.security.impl" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.transaction" level="TRACE" />
+  <logger name="com.att.dmf.mr.transaction.impl" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+  <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+
+  <logger name="org.onap.dmaap.dmf.mr.utils" level="TRACE" />
+  <logger name="org.onap.dmaap.mr.filter" level="TRACE" />
+
+  <!--<logger name="com.att.nsa.cambria.*" level="TRACE" />-->
+
+  <!-- Msgrtr loggers in ajsc -->
+  <logger name="org.onap.dmaap.service" level="TRACE" />
+  <logger name="org.onap.dmaap" level="TRACE" />
+
+
+  <!-- Spring related loggers -->
+  <logger name="org.springframework" level="TRACE" additivity="false"/>
+  <logger name="org.springframework.beans" level="TRACE" additivity="false"/>
+  <logger name="org.springframework.web" level="TRACE" additivity="false" />
+  <logger name="com.blog.spring.jms" level="TRACE" additivity="false" />
+
+  <!-- AJSC Services (bootstrap services) -->
+  <logger name="ajsc" level="TRACE" additivity="false"/>
+  <logger name="ajsc.RouteMgmtService" level="TRACE" additivity="false"/>
+  <logger name="ajsc.ComputeService" level="TRACE" additivity="false" />
+  <logger name="ajsc.VandelayService" level="TRACE" additivity="false"/>
+  <logger name="ajsc.FilePersistenceService" level="TRACE" additivity="false"/>
+  <logger name="ajsc.UserDefinedJarService" level="TRACE" additivity="false" />
+  <logger name="ajsc.UserDefinedBeansDefService" level="TRACE" additivity="false" />
+  <logger name="ajsc.LoggingConfigurationService" level="TRACE" additivity="false" />
+
+  <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+    logging) -->
+  <logger name="ajsc.utils" level="TRACE" additivity="false"/>
+  <logger name="ajsc.utils.DME2Helper" level="TRACE" additivity="false" />
+  <logger name="ajsc.filters" level="TRACE" additivity="false" />
+  <logger name="ajsc.beans.interceptors" level="TRACE" additivity="false" />
+  <logger name="ajsc.restlet" level="TRACE" additivity="false" />
+  <logger name="ajsc.servlet" level="TRACE" additivity="false" />
+  <logger name="com.att" level="TRACE" additivity="false" />
+  <logger name="com.att.ajsc.csi.logging" level="TRACE" additivity="false" />
+  <logger name="com.att.ajsc.filemonitor" level="TRACE" additivity="false"/>
+
+  <logger name="com.att.nsa.dmaap.util" level="TRACE" additivity="false"/>
+  <logger name="com.att.cadi.filter" level="TRACE" additivity="false" />
+
+
+  <!-- Other Loggers that may help troubleshoot -->
+  <logger name="net.sf" level="TRACE" additivity="false" />
+  <logger name="org.apache.commons.httpclient" level="TRACE" additivity="false"/>
+  <logger name="org.apache.commons" level="TRACE" additivity="false" />
+  <logger name="org.apache.coyote" level="TRACE" additivity="false"/>
+  <logger name="org.apache.jasper" level="TRACE" additivity="false"/>
+
+  <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+    May aid in troubleshooting) -->
+  <logger name="org.apache.camel" level="TRACE" additivity="false" />
+  <logger name="org.apache.cxf" level="TRACE" additivity="false" />
+  <logger name="org.apache.camel.processor.interceptor" level="TRACE" additivity="false"/>
+  <logger name="org.apache.cxf.jaxrs.interceptor" level="TRACE" additivity="false" />
+  <logger name="org.apache.cxf.service" level="TRACE" additivity="false" />
+  <logger name="org.restlet" level="TRACE" additivity="false" />
+  <logger name="org.apache.camel.component.restlet" level="TRACE" additivity="false" />
+  <logger name="org.apache.kafka" level="TRACE" additivity="false" />
+  <logger name="org.apache.zookeeper" level="TRACE" additivity="false" />
+  <logger name="org.I0Itec.zkclient" level="TRACE" additivity="false" />
+
+  <!-- logback internals logging -->
+  <logger name="ch.qos.logback.classic" level="TRACE" additivity="false"/>
+  <logger name="ch.qos.logback.core" level="TRACE" additivity="false" />
+
+  <!-- logback jms appenders & loggers definition starts here -->
+  <!-- logback jms appenders & loggers definition starts here -->
+  <appender name="auditLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="perfLogs" class="ch.qos.logback.core.ConsoleAppender">
+    <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+    </filter>
+    <encoder>
+      <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+    </encoder>
+  </appender>
+  <appender name="ASYNC-audit" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Audit-Record-Queue" />
+  </appender>
+
+  <logger name="AuditRecord" level="TRACE" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <logger name="AuditRecord_DirectCall" level="TRACE" additivity="FALSE">
+    <appender-ref ref="STDOUT" />
+  </logger>
+  <appender name="ASYNC-perf" class="ch.qos.logback.classic.AsyncAppender">
+    <queueSize>1000</queueSize>
+    <discardingThreshold>0</discardingThreshold>
+    <appender-ref ref="Performance-Tracker-Queue" />
+  </appender>
+  <logger name="PerfTrackerRecord" level="TRACE" additivity="FALSE">
+    <appender-ref ref="ASYNC-perf" />
+    <appender-ref ref="perfLogs" />
+  </logger>
+  <!-- logback jms appenders & loggers definition ends here -->
+
+  <root level="TRACE">
+    <appender-ref ref="DEBUG" />
+    <appender-ref ref="ERROR" />
+    <appender-ref ref="INFO" />
+    <appender-ref ref="STDOUT" />
+  </root>
+
+</configuration>
diff --git a/install/helm/nrt-base-1/charts/ves-mr/config/ves-dmaap-config.json b/install/helm/nrt-base-1/charts/ves-mr/config/ves-dmaap-config.json
new file mode 100644 (file)
index 0000000..85e45c0
--- /dev/null
@@ -0,0 +1,10 @@
+{
+
+  "ves-measurement": {
+    "type": "message_router",
+    "dmaap_info": {
+      "location": "mtl5",
+      "topic_url": "http://message-router.nonrtric:3904/events/file-ready/",
+    }
+  }
+}
\ No newline at end of file
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap1.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap1.yaml
new file mode 100644 (file)
index 0000000..725c1f1
--- /dev/null
@@ -0,0 +1,25 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2023 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: msgrtrapi
+  namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/MsgRtrApi.properties").AsConfig | nindent 2 }}
 #  ============LICENSE_END=================================================
 #
 
-nrtpmlog:
-  clientsecret: fwbKl26FccceE4godFW8Hkdi5Wi4Lhke
\ No newline at end of file
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: logback
+  namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/logback.xml").AsConfig | nindent 2 }}
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap3.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap3.yaml
new file mode 100644 (file)
index 0000000..ddca7fd
--- /dev/null
@@ -0,0 +1,25 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2023 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ves-collector-collector.properties
+  namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/collector.properties").AsConfig | nindent 2 }}
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap4.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-configmap4.yaml
new file mode 100644 (file)
index 0000000..92386a8
--- /dev/null
@@ -0,0 +1,25 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2023 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: ves-collector-ves-dmaap-config.json
+  namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/ves-dmaap-config.json").AsConfig | nindent 2 }}
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-message-router.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-message-router.yaml
new file mode 100644 (file)
index 0000000..662614c
--- /dev/null
@@ -0,0 +1,56 @@
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: message-router
+  namespace: nonrtric
+  labels:
+    app: message-router
+
+spec:
+  selector:
+    matchLabels:
+      app: message-router
+  serviceName: message-router
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app.kubernetes.io/name: message-router
+        app: message-router
+      name: message-router
+    spec:
+      containers:
+        - name: message-router
+          image: nexus3.onap.org:10002/onap/dmaap/dmaap-mr:1.4.4
+          imagePullPolicy: Always
+          ports:
+          - containerPort: 3904
+            name: api
+
+          env:
+          # - name: JAASLOGIN
+          #   valueFrom:
+          #     secretKeyRef:
+          #       name: strimzi-kafka-admin
+          #       key: sasl.jaas.config
+          # - name: SASLMECH
+          #   value: scram-sha-512
+          - name: enableCadi
+            value: "false"
+          - name: useZkTopicStore
+            value: "false"
+          volumeMounts:
+            - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+              subPath: MsgRtrApi.properties
+              name: msgrtrapi
+            - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
+              subPath: logback.xml
+              name: logback
+      volumes:
+        - name: msgrtrapi
+          configMap:
+            name: msgrtrapi
+        - name: logback
+          configMap:
+            name: logback
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-ves-collector.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-ves-collector.yaml
new file mode 100644 (file)
index 0000000..f04c4b4
--- /dev/null
@@ -0,0 +1,43 @@
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: ves-collector
+  namespace: nonrtric
+  labels:
+    run: ves-collector
+spec:
+  selector:
+    matchLabels:
+      run: ves-collector
+  template:
+    metadata:
+      labels:
+        run: ves-collector
+    spec:
+      volumes:
+        - name: conf-vol1
+          configMap:
+            name: ves-collector-collector.properties
+        - name: conf-vol2
+          configMap:
+            name: ves-collector-ves-dmaap-config.json
+      containers:
+      - name: ves-collector
+        image: nexus3.onap.org:10002/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.12.3
+        imagePullPolicy: IfNotPresent
+        env:
+        - name: DMAAPHOST
+          value: message-router.nonrtric
+        ports:
+        - name: http
+          containerPort: 8080
+        - name: https
+          containerPort: 8443
+        volumeMounts:
+        - name: conf-vol1
+          mountPath: /opt/app/VESCollector/etc/collector.properties
+          subPath: collector.properties
+        - name: conf-vol2
+          mountPath: /opt/app/VESCollector/etc/ves-dmaap-config.json
+          subPath: ves-dmaap-config.json
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-zk-tunnel.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-deployment-zk-tunnel.yaml
new file mode 100644 (file)
index 0000000..7d3c9e4
--- /dev/null
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: zoo-entrance
+  namespace: nonrtric
+  labels:
+    app: zoo-entrance
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: zoo-entrance
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        app: zoo-entrance
+    spec:
+      containers:
+        - name: zoo-entrance
+          image: 'ghcr.io/scholzj/zoo-entrance:latest'
+          command:
+            - /opt/stunnel/stunnel_run.sh
+          ports:
+            - containerPort: 2181
+              name: zoo
+              protocol: TCP
+          env:
+            - name: LOG_LEVEL
+              value: notice
+            - name: STRIMZI_ZOOKEEPER_CONNECT
+              value: 'kafka-1-zookeeper-client:2181'
+          imagePullPolicy: Always
+          livenessProbe:
+            exec:
+              command:
+                - /opt/stunnel/stunnel_healthcheck.sh
+                - '2181'
+            failureThreshold: 3
+            initialDelaySeconds: 15
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          readinessProbe:
+            exec:
+              command:
+                - /opt/stunnel/stunnel_healthcheck.sh
+                - '2181'
+            failureThreshold: 3
+            initialDelaySeconds: 15
+            periodSeconds: 10
+            successThreshold: 1
+            timeoutSeconds: 5
+          volumeMounts:
+            - mountPath: /etc/cluster-operator-certs/
+              name: cluster-operator-certs
+            - mountPath: /etc/cluster-ca-certs/
+              name: cluster-ca-certs
+      restartPolicy: Always
+      terminationGracePeriodSeconds: 30
+      volumes:
+        - name: cluster-operator-certs
+          secret:
+            defaultMode: 288
+            secretName: kafka-1-cluster-operator-certs
+        - name: cluster-ca-certs
+          secret:
+            defaultMode: 288
+            secretName: kafka-1-cluster-ca-cert
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-networkpolicy-zk-tunnel.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-networkpolicy-zk-tunnel.yaml
new file mode 100644 (file)
index 0000000..d5ecf51
--- /dev/null
@@ -0,0 +1,21 @@
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+  labels:
+    app: zoo-entrance
+  name: zoo-entrance
+spec:
+  ingress:
+  - from:
+    - podSelector:
+        matchLabels:
+          app: zoo-entrance
+    ports:
+    - port: 2181
+      protocol: TCP
+  podSelector:
+    matchLabels:
+      strimzi.io/name: kafka-1-zookeeper
+  policyTypes:
+  - Ingress
\ No newline at end of file
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-message-router.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-message-router.yaml
new file mode 100644 (file)
index 0000000..e07ea1b
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: message-router
+  namespace: nonrtric
+  labels:
+    app: message-router
+spec:
+  ports:
+    - port: 3904
+      targetPort: 3904
+      protocol: TCP
+      name: http
+  type: ClusterIP
+  selector:
+   app: message-router
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-ves-collector.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-ves-collector.yaml
new file mode 100644 (file)
index 0000000..856532f
--- /dev/null
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: ves-collector
+  namespace: nonrtric
+  labels:
+    run: ves-collector
+spec:
+  type: NodePort
+  ports:
+  - port: 8080
+    targetPort: 8080
+    name: http
+    nodePort: 31760
+  - port: 8443
+    name: https
+    nodePort: 31761
+  selector:
+    run: ves-collector
diff --git a/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-zk-tunnel.yaml b/install/helm/nrt-base-1/charts/ves-mr/templates/app-service-zk-tunnel.yaml
new file mode 100644 (file)
index 0000000..36cbe4c
--- /dev/null
@@ -0,0 +1,17 @@
+
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    app: zoo-entrance
+  name: zoo-entrance
+  namespace: nonrtric
+spec:
+  ports:
+    - name: zoo
+      port: 2181
+      protocol: TCP
+      targetPort: 2181
+  selector:
+    app: zoo-entrance
+  type: ClusterIP
diff --git a/install/helm/nrt-pm-log/TODO.txt b/install/helm/nrt-pm-log/TODO.txt
deleted file mode 100644 (file)
index f1f4ce1..0000000
+++ /dev/null
@@ -1 +0,0 @@
-fix unique kafka clientids
\ No newline at end of file
index d6cf981..829da65 100644 (file)
@@ -1,14 +1,23 @@
 {
-    "info_type_id": "PmData",
-    "job_owner": "console",
-    "job_result_uri": "",
-    "job_definition": {
-       "filter": {
-
-       },
-       "deliveryInfo": {
-          "topic": "pmreports",
-          "bootStrapServers": "kafka-1-kafka-bootstrap.nonrtric:9097"
-       }
-    }
- }
\ No newline at end of file
+   "info_type_id": "PmData",
+   "job_owner": "console",
+   "job_definition": {
+      "filter": {
+         "sourceNames": [],
+         "measObjInstIds": [],
+         "measTypeSpecs": [
+            {
+               "measuredObjClass": "NRCellDU",
+               "measTypes": [
+                  "pmCounterNumber101"
+               ]
+            }
+         ],
+         "measuredEntityDns": []
+      },
+      "deliveryInfo": {
+         "topic": "pmreports",
+         "bootStrapServers": "kafka-1-kafka-bootstrap.nonrtric:9097"
+      }
+   }
+}
\ No newline at end of file
index f69b8f4..cdfa8db 100644 (file)
@@ -45,10 +45,9 @@ spec:
           emptyDir: {}
       containers:
       - name: pmlog
-        image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmlog:1.0.0
-        #image: o-ran-sc/nonrtric-plt-pmlog:1.0.0-SNAPSHOT
+#       image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmlog:1.0.0
+        image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-pmlog:1.0.0-SNAPSHOT
         imagePullPolicy: Always
-        #imagePullPolicy: Never
         ports:
         - name: http
           containerPort: 8084
@@ -64,7 +63,7 @@ spec:
         - mountPath: /token-cache
           name: token-cache-volume
         env:
-        - name: APP_INFLUX_ACCESS-TOKEN
+        - name: APP_INFLUX_ACCESSTOKEN
           valueFrom:
             secretKeyRef:
               name: influxdb-api-token
index f16a119..5df4d71 100644 (file)
@@ -25,8 +25,12 @@ metadata:
 spec:
   containers:
   - name: pm-rapp
-    image: pm-rapp:latest
+    image: {{ .Values.global.extimagerepo }}pm-rapp:latest
+    {{- if .Values.global.extimagerepo }}
+    imagePullPolicy: Always
+    {{- else }}
     imagePullPolicy: Never
+    {{- end }}
     ports:
     - name: http
       containerPort: 80
diff --git a/install/helm/nrt-pm/charts/dfc/.gitignore b/install/helm/nrt-pm/charts/dfc/.gitignore
new file mode 100644 (file)
index 0000000..7f47975
--- /dev/null
@@ -0,0 +1 @@
+values.yaml
index d7c1d4f..ed095ac 100644 (file)
@@ -46,10 +46,9 @@ spec:
       - name: dfc
         securityContext:
           runAsUser: 0     # Need to run as root - needed when writing to hostpath
-        image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0
-        #image: o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0-SNAPSHOT
+#       image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0
+        image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0-SNAPSHOT
         imagePullPolicy: Always
-        #imagePullPolicy: Never
         ports:
         - name: http
           containerPort: 8100
index 74029ec..b737e67 100644 (file)
@@ -34,7 +34,8 @@ spec:
     spec:
       containers:
       - name: informationservice
-        image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0
+#       image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0
+        image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0-SNAPSHOT
         imagePullPolicy: Always
         ports:
         - name: http
index b8055a8..b28e025 100644 (file)
@@ -38,8 +38,12 @@ spec:
     spec:
       containers:
       - name: kafka-producer-pm-json2influx
-        image: kafka-pm-producer:latest
+        image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+        {{- if .Values.global.extimagerepo }}
+        imagePullPolicy: Always
+        {{- else }}
         imagePullPolicy: Never
+        {{- end }}
         ports:
         - name: http
           containerPort: 80
index f998904..ef7e60a 100644 (file)
@@ -38,8 +38,12 @@ spec:
     spec:
       containers:
       - name: kafka-producer-pm-json2kafka
-        image: kafka-pm-producer:latest
+        image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+        {{- if .Values.global.extimagerepo }}
+        imagePullPolicy: Always
+        {{- else }}
         imagePullPolicy: Never
+        {{- end }}
         ports:
         - name: http
           containerPort: 80
index 528582f..06f289e 100644 (file)
@@ -38,8 +38,12 @@ spec:
     spec:
       containers:
       - name: kafka-producer-pm-xml2json
-        image: kafka-pm-producer:latest
+        image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+        {{- if .Values.global.extimagerepo }}
+        imagePullPolicy: Always
+        {{- else }}
         imagePullPolicy: Never
+        {{- end }}
         ports:
         - name: http
           containerPort: 80
diff --git a/install/helm/nrt-pm/charts/pm-producer-json2kafka/.gitignore b/install/helm/nrt-pm/charts/pm-producer-json2kafka/.gitignore
new file mode 100644 (file)
index 0000000..7f47975
--- /dev/null
@@ -0,0 +1 @@
+values.yaml
index 565d763..8f3866b 100644 (file)
@@ -36,10 +36,9 @@ spec:
     spec:
       containers:
       - name: pm-producer-json2kafka
-        image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmproducer:1.0.0
-        #image: o-ran-sc/nonrtric-plt-pmproducer:1.0.0-SNAPSHOT
+#       image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmproducer:1.0.0
+        image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-pmproducer:1.0.0-SNAPSHOT
         imagePullPolicy: Always
-        #imagePullPolicy: Never
         ports:
         - name: http
           containerPort: 8084
index 7db28cc..ee05a69 100644 (file)
@@ -16,4 +16,4 @@
 #
 
 pmproducerjson2kafka:
-  clientsecret: Wy7TuisvAJZ972xG9pRznKfI1gksVx8z
\ No newline at end of file
+  clientsecret: NIIrUK0LisJ0iUVbzOMXtwnnhzjyHtmh
\ No newline at end of file
index ced031e..01b94f4 100644 (file)
@@ -23,7 +23,7 @@ metadata:
   labels:
     app: pm-https-server
 spec:
-  replicas: 1    # Max 10 = number of generated certs unique ...
+  replicas: {{ .Values.global.numhttpsservers }}    # Max 10 = number of generated unique certs  ...
   serviceName: pm-https-server
   selector:
     matchLabels:
@@ -60,8 +60,12 @@ spec:
           runAsUser: 0
       containers:
       - name: pm-https-server
-        image: pm-https-server:latest
+        image: {{ .Values.global.extimagerepo }}pm-https-server:latest
+        {{- if .Values.global.extimagerepo }}
+        imagePullPolicy: Always
+        {{- else }}
         imagePullPolicy: Never
+        {{- end }}
         ports:
         - name: http
           containerPort: 80
@@ -72,12 +76,12 @@ spec:
         # If env is missing, the file in the call to "/files/<filename> url must exist in the server
         - name: ALWAYS_RETURN
           value: /ne-files/pm.xml.gz
-        # Env must be specified if genetated files use. The value shall spefify the first timestamp of a series of pm files
+        # Env must be specified if generated files use. The value shall specify the first timestamp of a series of pm files
         # If a file with a timestamp less than the below will return 404
-        # Timestamp shall be gvien with date.time where minutes has values 00,15,45 and the given timezone
+        # Timestamp shall be given with date.time where minutes has values 00,15,45 and the given timezone
         # Example: 20230220.1300 - denotes a first file name of 20230220.1300+0100-1315+0100_<node-name>.xml.gz
         - name: GENERATED_FILE_START_TIME
-          value: "20230220.1300"
+          value: "20230515.0700"
         # Timezone to use for generated files. If not given, timezone 0000 will be used
         # Shall include +/- sign for the timezone value
         - name: GENERATED_FILE_TIMEZONE
index 4822e9a..6cb2caf 100755 (executable)
 SAMELINE="\033[0K\r"
 
 # Variables
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
 if [ $? -ne 0 ]; then
-    echo $KHOST
+    echo $KUBERNETESHOST
     echo "Exiting"
     exit 1
 fi
 
 echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
-echo "Kubernetes control plane host: $KHOST"
+echo "Kubernetes control plane host: $KUBERNETESHOST"
 echo "Host obtained from current kubectl context"
 echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
 
+echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+echo "Checking requirements"
+echo " Checking if istio is installed"
+kubectl $KUBECONF get authorizationpolicies -A &> /dev/null
+if [ $? -ne 0 ]; then
+    echo "  Istio api: kubectl get authorizationpolicies is not installed"
+    exit 1
+else
+    echo "  OK"
+fi
+echo " Checking if jq is installed"
+tmp=$(type jq)
+if [ $? -ne 0 ]; then
+       echo "  Command utility jq (cmd-line json processor) is not installed"
+       exit 1
+else
+    echo "  OK"
+fi
+echo " Checking if envsubst is installed"
+tmp=$(type envsubst)
+if [ $? -ne 0 ]; then
+       echo "  Command utility envsubst (env var substitution in files) is not installed"
+       exit 1
+else
+    echo "  OK"
+fi
+
 echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
 echo "Restarting istiod, workaround to refresh jwks cache"
 kubectl rollout restart deployments/istiod -n istio-system
@@ -69,7 +96,6 @@ helm install --wait -n nonrtric nrt-base-0 helm/nrt-base-0
 
 # Create realm in keycloak
 
-##export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
 . scripts/populate_keycloak.sh
 
 create_realms nonrtric-realm
@@ -84,63 +110,11 @@ check_error $?
 generate_client_secrets nonrtric-realm $cid
 check_error $?
 
-# retcode=0
-# while [ $retcode -eq 0 ]; do
-#     #NRT_REALM_JWKS=$(kubectl exec -n nonrtric client -- curl -f http://keycloak.nonrtric:8080/realms/nonrtric-realm/protocol/openid-connect/certs)
-#     NRT_REALM_JWKS=$(curl -fs localhost:31788/realms/nonrtric-realm/protocol/openid-connect/certs)
-#     if [ $? -eq 0 ]; then
-#         retcode=1
-#         #echo $NRT_REALM_JWKS
-#         echo "JWKS for nonrtric-realm obtained"
-#     else
-#         sleep 3
-#         echo "Wating for keycloak to publish JWKS for nonrtric-realm"
-#     fi
-# done
-
-# export NRT_REALM_JWKS
-
 echo ""
 
-# ##################################################################################
-# echo "##### Installing chart httpecho"
-# ##################################################################################
-
-#helm install --wait -n nonrtric httpecho helm/httpecho
-
-
-
-# TSEC=$SECONDS
-# numok=0
-# while [ $numok -lt 10 ]; do
-#     echo ""
-#     echo "Time: $(($SECONDS-$TSEC))"
-#     cid="console-setup"
-#     __get_admin_token
-#     TOKEN=$(get_client_token nonrtric-realm $cid)
-#     decode_token "$TOKEN"
-#     curl -fv localhost:31789/ -H "Authorization: Bearer $TOKEN"
-#     if [ $? -eq 0 ]; then
-#         let numok=numok+1
-#     fi
-#     sleep 5
-# done
-
-# TSEC=$SECONDS
-# while [ true ]; do
-#     echo ""
-#     echo "Time: $(($SECONDS-$TSEC))"
-#     cid="console-setup"
-#     __get_admin_token
-#     TOKEN=$(get_client_token nonrtric-realm $cid)
-#     decode_token "$TOKEN"
-#     curl -v localhost:31789/ -H "Authorization: Bearer $TOKEN"
-#     sleep 5
-# done
 cid="console-setup"
 __get_admin_token
 TOKEN=$(get_client_token nonrtric-realm $cid)
-decode_token "$TOKEN"
 
 ##################################################################################
 echo "##### Installing charts: strimzi and nrt-base-1"
@@ -153,11 +127,9 @@ helm install --wait strimzi-kafka-crds -n nonrtric strimzi/strimzi-kafka-operato
 
 cp opa-rules/bundle.tar.gz helm/nrt-base-1/charts/opa-rule-db/data
 
-#envsubst < helm/nrt-base-1/charts/httpecho/values-template.yaml > helm/nrt-base-1/charts/httpecho/values.yaml
-
 helm install -n nonrtric nrt-base-1 helm/nrt-base-1
 
-
+echo "Waiting for influx db - there may be error messages while trying..."
 retcode=1
 while [ $retcode -eq 1 ]; do
     retcode=0
@@ -167,7 +139,7 @@ while [ $retcode -eq 1 ]; do
         sleep 1
     elif [ "$CONFIG" == "{}" ]; then
         echo "Configuring db"
-        kubectl exec -n nonrtric influxdb2-0 -- influx setup -u bm -p mySuP3rS3cr3tT0keN -o est -b pm-bucket -f
+        kubectl exec -n nonrtric influxdb2-0 -- influx setup -u admin -p mySuP3rS3cr3tT0keN -o est -b pm-bucket -f
         if [ $? -ne 0 ]; then
             retcode=1
             sleep 1
@@ -178,12 +150,10 @@ while [ $retcode -eq 1 ]; do
 done
 
 # Save influx user api-token to secret
-INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric)
-INFLUXDB2_TOKEN=$(echo -n $INFLUXDB2_TOKEN | base64)
-PATCHDATA='[{"op": "add", "path": "/data/token", "value": "'$INFLUXDB2_TOKEN'" }]'
+INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric | base64)
+PATCHDATA='[{"op": "add", "path": "/data/token", "value": "'$INFLUXDB2_TOKEN'"}]'
 kubectl patch secret influxdb-api-token -n nonrtric --type json -p "$PATCHDATA"
 
-
 echo "Wait for kafka"
 _ts=$SECONDS
 until $(kubectl exec -n nonrtric kafka-client -- kafka-topics --list --bootstrap-server kafka-1-kafka-bootstrap.nonrtric:9092 1> /dev/null 2> /dev/null); do
@@ -207,7 +177,7 @@ echo "##### Installing: chart ran"
 ./helm/ran/certs/gen-certs.sh 10
 check_error $?
 
-helm install --wait -n ran ran helm/ran
+helm install --wait -f helm/global-values.yaml -n ran ran helm/ran
 
 echo ""
 
@@ -282,10 +252,7 @@ export APP_CLIENT_SECRET=$(< .sec_nonrtric-realm_$cid)
 
 envsubst < helm/nrt-pm/charts/dfc/values-template.yaml > helm/nrt-pm/charts/dfc/values.yaml
 
-
-#envsubst < helm/nrt-pm/charts/ics/values-template.yaml > helm/nrt-pm/charts/ics/values.yaml
-
-helm install --wait -n nonrtric nrt-pm helm/nrt-pm
+helm install --wait -f helm/global-values.yaml -n nonrtric nrt-pm helm/nrt-pm
 
 echo ""
 
index 9f4abb3..1de513e 100755 (executable)
@@ -29,14 +29,14 @@ check_error() {
     fi
 }
 
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
 if [ $? -ne 0 ]; then
-    echo $KHOST
+    echo $KUBERNETESHOST
     echo "Exiting"
     exit 1
 fi
 
-echo "Kubernetes control plane host: $KHOST"
+echo "Kubernetes control plane host: $KUBERNETESHOST"
 
 . scripts/kube_get_nodeport.sh
 . scripts/get_influxdb2_token.sh
@@ -53,7 +53,6 @@ bucket=pm-bucket
 echo "Creating bucket $bucket in influxdb2"
 create_influxdb2_bucket influxdb2-0 nonrtric $bucket
 
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
 . scripts/populate_keycloak.sh
 
 cid="console-setup"
index 1dd3859..0b3c7ab 100755 (executable)
@@ -25,9 +25,9 @@
 echo "Installing pmlog"
 
 # Variables
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
 if [ $? -ne 0 ]; then
-    echo $KHOST
+    echo $KUBERNETESHOST
     echo "Exiting"
     exit 1
 fi
@@ -42,12 +42,6 @@ check_error() {
     fi
 }
 
-# echo " Retriving influxdb2 access token..."
-# export INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric)
-
-# envsubst < nrt-pm-log/values-template.yaml > nrt-pm-log/values.yaml
-
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
 . scripts/populate_keycloak.sh
 
 cid="nrt-pm-log"
index 37fb826..6e00633 100755 (executable)
@@ -19,7 +19,7 @@
 
 
 
-echo "Installtion pmrapp"
+echo "Installing pmrapp"
 
 . scripts/kube_get_controlplane_host.sh
 . scripts/kube_get_nodeport.sh
@@ -38,16 +38,14 @@ check_error() {
 echo "Creating client in keycloak"
 
 # Find host and port to keycloak
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
 if [ $? -ne 0 ]; then
-    echo $KHOST
+    echo $KUBERNETESHOST
     echo "Exiting"
     exit 1
 fi
 
-create_topic kafka-1-kafka-bootstrap.nonrtric:9092 pm-rapp 10
-
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
+create_topic kafka-1-kafka-bootstrap.nonrtric:9092 rapp-topic 10
 
 . scripts/populate_keycloak.sh
 
@@ -62,7 +60,7 @@ export PMRAPP_CLIENT_SECRET=$(< .sec_nonrtric-realm_$cid)
 envsubst < helm/nrt-pm-rapp/values-template.yaml > helm/nrt-pm-rapp/values.yaml
 
 echo " helm install..."
-helm install --wait -n nonrtric nrt-pm-rapp helm/nrt-pm-rapp
+helm install --wait -f helm/global-values.yaml -n nonrtric nrt-pm-rapp helm/nrt-pm-rapp
 
 echo "done"
 
index 093cc76..c520c71 100644 (file)
@@ -1,11 +1,15 @@
 
 
 
-# Build buundle
+# Build bundle
 
 tar cvf bundle.tar rules data.json
 gzip bundle.tar
 
+# Installation
+
+The bundle is installed as part of the install-nrt.sh script.
+
 
 ## License
 
diff --git a/install/scripts/README.md b/install/scripts/README.md
new file mode 100644 (file)
index 0000000..b1e5572
--- /dev/null
@@ -0,0 +1,70 @@
+
+## General
+
+This folder contains scripts to push pm event to the file-ready kafka topic, directly to the topic or via the ves-collector.
+For simple testing, pushing directly to the topic is much faster but if integration with e.g. ran simulators; pushing via the ves-collector might be a better option.
+
+
+## Script - push-genfiles-to-file-ready-topic.sh
+
+This script push generated pm files directly to the file-ready kafka topic.
+
+The script pushes events from one or more RAN nodes. For each RAN node, one or more events are pushed in sequence where each event in the sequence represents a 15 min measurement.
+Although each pm-file represents a 15 min measurement period, all events are pushed in a sequence without any delay between the files.
+The RAN nodes are specified as a single base name, eg. "NODE-A" and the the script with name the nodes "NODE-A-0", "NODE-A-1" etc.
+The event will contain a url to each pm file. These urls points to a web server (simulating a RAN node). The number of web server are fixed so the generated urls are generated so the load is spread out over the web server.
+
+### Parmeters
+
+`push-genfiles-to-file-ready-topic.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"`
+
+- node-count : The number of simulated RAN nodes
+- number-of-events : The number of 15 min measurements (event) per node
+- node-name-base : Base name of the RAN nodes, index 0,1,2 etc will be added to the name
+- file-extension : The pm file extension - should match the actual pm file to be downloaded from the web-servers (simulated RAN nodes)
+- sftp|ftps|https -  Protocol for downloading pm files - only https is currently supported
+- num-servers - The number of web servers for pm file download. Should match the number of web servers actually started by the install script. This script generates pm file url to one of the web servers to spread the load. Note that this number can be different from the node-count parameter.
+- hist :  By default, each event only contains the reference to a single pm file. If the parameter is given then each event will contain the latest pm file and 95 of the previous file to represent a full 24h set of pm files.
+
+
+## Script - push-genfiles-to-ves-collector.sh
+
+This script push generated pm files via the ves-collector to the file-ready kafka topic.
+The parameter are exactly same as for the `push-genfiles-to-file-ready-topic.sh` script.
+
+## Typical usage
+
+The below example
+
+`<script> 20 100 GNODEB xml.gz https 10`
+
+This will push 100 events (file ready events) for each of the 20 nodes. The timestamp of the first files for the 20 node will be as configured in this script. Each following events will have a timestamp of 15 minutes later.
+The nodes will be named `GNODEB-0 -- GNODEB-19`.
+The file format of the file url will use xml.gz (the corresponding files are expected to exist or be generated by the server url).
+Files will be downloaded by the datafile collector using https.
+The ranpm is setup with 10 https servers (ran simulators) where the files will be downloaded. Note that this number should be equal to or less than the number of https servers.
+
+The following file names will be present in the sequence of events - a total of 200 event (20 *100):
+
+`A20230515.0700+0100-0715+0100_GNODEB-0.xml.gz`
+`A20230515.0700+0100-0715+0100_GNODEB-1.xml.gz` \
+`...`\
+`A20230516.0745+0100-0800+0100_GNODEB-18.xml.gz`
+`A20230516.0745+0100-0800+0100_GNODEB-19.xml.gz`
+
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
old mode 100644 (file)
new mode 100755 (executable)
index 7634899..7e0e95c
@@ -20,6 +20,9 @@
 # args: <job-id> <job-index-suffix> [<access-token>]
 # job file shall exist in file "".job.json"
 create_ics_job() {
+
+    ICS_PROXY_PORT=$(kubectl get svc -n nonrtric informationservice --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
+    echo "NodePort to ics: "$ICS_PROXY_PORT
     JOB=$(<.job.json)
     echo $JOB
     retcode=1
@@ -30,7 +33,7 @@ create_ics_job() {
         else
             __bearer="Authorization: Bearer $TOKEN"
         fi
-        STAT=$(curl -s -X PUT -w '%{http_code}' -H accept:application/json -H Content-Type:application/json http://$KHOST:$(kube_get_nodeport informationservice nonrtric http)/data-consumer/v1/info-jobs/job-$1"-"$2 --data-binary @.job.json -H "$__bearer" )
+        STAT=$(curl -s -X PUT -w '%{http_code}' -H accept:application/json -H Content-Type:application/json http://$KUBERNETESHOST:$ICS_PROXY_PORT/data-consumer/v1/info-jobs/job-$1"-"$2 --data-binary @.job.json -H "$__bearer" )
         retcode=$?
         echo "curl return code: $retcode"
         if [ $retcode -eq 0 ]; then
index debc00f..77655ed 100644 (file)
@@ -37,7 +37,7 @@ create_topic() {
 
     echo "Creating topic: $topic with $partitions partition(s) in $kafka"
 
-    kubectl exec -it client -n nonrtric -- bash -c 'kafka-topics --create --topic '$topic'  --partitions '$partitions' --bootstrap-server '$kafka
+    kubectl exec -it kafka-client -n nonrtric -- bash -c 'kafka-topics --create --topic '$topic'  --partitions '$partitions' --bootstrap-server '$kafka
 
     return $?
 }
index cb1fc65..4917295 100755 (executable)
@@ -32,6 +32,6 @@ get_influxdb2_token() {
                        sleep 1
                fi
        done
-       echo $__influxdb2_access_token
+       echo -n $__influxdb2_access_token
        return 0
 }
\ No newline at end of file
index fe63ef5..61c5297 100755 (executable)
@@ -22,24 +22,25 @@ echo "script-home: "$SD
 cd $SD
 CWD=$PWD
 
-NODE_COUNT=$1
-EVT_COUNT=$2
-NODE_NAME_BASE=$3
-FILE_EXT=$4
-TYPE=$5
-SRV_COUNT=$6
-HIST=$7
+VARIANT=$1
+NODE_COUNT=$2
+EVT_COUNT=$3
+NODE_NAME_BASE=$4
+FILE_EXT=$5
+TYPE=$6
+SRV_COUNT=$7
+HIST=$8
 
 FTPES_PORT=2021
 SFTP_PORT=2022
 HTTPS_PORT=443
 
 print_usage() {
-    echo "Usage: kafka-client-send-genfiles-file-ready.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
+    echo "Usage: kafka-client-send-genfiles-file-ready.sh ves|file-ready <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
     exit 1
 }
 echo $@
-if [ $# -lt 6 ] && [ $# -gt 7 ]; then
+if [ $# -lt 7 ] && [ $# -gt 8 ]; then
     print_usage
 fi
 
@@ -65,16 +66,25 @@ if [ ! -z "$HIST" ]; then
     fi
     HIST_LEN=96
 fi
+PUSHMSG=""
+if [ $VARIANT == "ves" ]; then
+    PUSHMSG="to ves-collector"
+elif [ $VARIANT == "file-ready" ]; then
+    PUSHMSG="to file-ready topic"
+fi
 
-# Unix time of 20230220.1300
+# Unix time of 20230515.0700
 # If the value is changed, make sure to set the same time to the env var GENERATED_FILE_START_TIME in kube-plt.yaml for the https-server
-BEGINTIME=1676898000
+BEGINTIME=1684134000
 # Time zone
 # If the value is changed, make sure to set the same value to the env var GENERATED_FILE_TIMEZONE in kube-plt.yaml for the https-server
 TIMEZONE="+0100"
 CURTIME=$BEGINTIME
 
 BATCHSIZE=1000
+if [ $VARIANT == "ves" ]; then
+    BATCHSIZE=20
+fi
 
 CNTR=0
 TCNTR=0
@@ -84,6 +94,9 @@ for (( i=0; i<$EVT_COUNT; i++)); do
     if [ $CNTR -eq 0 ]; then
         rm .out.json
         touch .out.json
+        if [ $VARIANT == "ves" ]; then
+            echo '{"eventList": [' > .out.json
+        fi
     fi
 
     if [ "$HIST" == "" ]; then
@@ -102,10 +115,8 @@ for (( i=0; i<$EVT_COUNT; i++)); do
         if [ "$HIST" == "" ]; then
             NO="$NODE_NAME_BASE-$j"
 
-            #FN="A20000626.2315+0200-2330+0200_$NO-$i.$FILE_EXT"
             FN="A$ST$TIMEZONE-$ET${TIMEZONE}_$NO.$FILE_EXT"
             let SRV_ID=$j%$SRV_COUNT
-            #let SRV_ID=SRV_ID+1
             echo "NODE "$NO
             echo "FILENAME "$FN
 
@@ -123,13 +134,20 @@ for (( i=0; i<$EVT_COUNT; i++)); do
                 echo "HTTP SERVER "$SRV
                 URL="https://$SRV:$HTTPS_PORT/generatedfiles/$FN"
             fi
-            EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+
+            if [ $VARIANT == "ves" ] && [ $CNTR -gt 0 ]; then
+                echo "," >> .out.json
+            fi
+            if [ $VARIANT == "ves" ]; then
+                EVT='{"commonEventHeader":{"domain":"notification","sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","eventId":"FileReady_'$TCNTR'","priority":"Normal","version":"4.0.1","vesEventListenerVersion":"7.0.1","sourceName":"'$NO'","reportingEntityName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'"},"notificationFields":{"notificationFieldsVersion":"2.0","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}'
+            else
+                EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+            fi
             echo $EVT >> .out.json
         else
             NO="$NODE_NAME_BASE-$j"
 
             let SRV_ID=$j%$SRV_COUNT
-            #let SRV_ID=SRV_ID+1
             echo "NODE "$NO
 
             EVT_FRAG=""
@@ -145,48 +163,55 @@ for (( i=0; i<$EVT_COUNT; i++)); do
                 if [ $FID -lt 0 ]; then
                     FN="NONEXISTING_$NO.$FILE_EXT"
                 else
-                    #FN="A20000626.2315+0200-2330+0200_$NO-$FID.$FILE_EXT"
                     FN="A$ST$TIMEZONE-$ET${TIMEZONE}_$NO.$FILE_EXT"
                 fi
                 echo "FILENAME "$FN
-                if [ $TYPE == "sftp" ]; then
-                    SRV="ftp-sftp-$SRV_ID"
-                    #echo "FTP SERVER "$SRV
-                    URL="sftp://onap:pano@$SRV:$SFTP_PORT/$FN"
-                elif [ $TYPE == "ftpes" ]; then
-                    SRV="ftp-ftpes-$SRV_ID"
-                    #echo "FTP SERVER "$SRV
-                    URL="ftpes://onap:pano@$SRV:$FTPES_PORT/$FN"
-                elif [ $TYPE == "https" ]; then
-                    SRV="pm-https-server-$SRV_ID.pm-https-server.ran"
-                    #echo "HTTP SERVER "$SRV
-                    URL="https://$SRV:$HTTPS_PORT/files/$FN"
-                fi
+                SRV="pm-https-server-$SRV_ID.pm-https-server.ran"
+                URL="https://$SRV:$HTTPS_PORT/files/$FN"
                 if [ "$EVT_FRAG" != "" ]; then
                     EVT_FRAG=$EVT_FRAG","
                 fi
                 EVT_FRAG=$EVT_FRAG'{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}'
             done
 
-            EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":['$EVT_FRAG']}}}'
+            if [ $VARIANT == "ves" ] && [ $CNTR -gt 0 ]; then
+                echo "," >> .out.json
+            fi
+            if [ $VARIANT == "ves" ]; then
+                EVT='{"commonEventHeader":{"domain":"notification","sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","eventId":"FileReady_'$TCNTR'","priority":"Normal","version":"4.0.1","vesEventListenerVersion":"7.0.1","sourceName":"'$NO'","reportingEntityName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'"},"notificationFields":{"notificationFieldsVersion":"2.0","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}'
+            else
+                EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+            fi
             echo $EVT >> .out.json
-
         fi
 
         let CNTR=CNTR+1
         let TCNTR=TCNTR+1
         if [ $CNTR -ge $BATCHSIZE ]; then
-            echo "Pushing batch of $CNTR events"
-            cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+            echo "Pushing batch of $CNTR events $PUSHMSG"
+            if [ $VARIANT == "ves" ]; then
+                echo ']}' >> .out.json
+                curl -s -X POST http://ves-collector.nonrtric:8080/eventListener/v7/eventBatch --header 'Content-Type: application/json' --data-binary @.out.json
+            else
+                cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+            fi
             rm .out.json
             touch .out.json
+            if [ $VARIANT == "ves" ]; then
+                echo '{"eventList": [' > .out.json
+            fi
             CNTR=0
         fi
     done
 done
 if [ $CNTR -ne 0 ]; then
-    echo "Pushing batch of $CNTR events"
-    cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+    echo "Pushing batch of $CNTR events $PUSHMSG"
+    if [ $VARIANT == "ves" ]; then
+        echo ']}' >> .out.json
+        curl -s -X POST http://ves-collector.nonrtric:8080/eventListener/v7/eventBatch --header 'Content-Type: application/json' --data-binary @.out.json
+    else
+        cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+    fi
 fi
 
 echo "Pushed $TCNTR events"
index 230ac2f..abc43f2 100755 (executable)
 
 # Script intended to be sourced by other script to add functions to the keycloak rest API
 
-echo "Cluster ip: $KHOST"
+echo "Cluster ip: $KUBERNETESHOST"
 
-echo "Keycloak nodeport: $KC_PORT"
-
-#KC_URL="http://$KHOST:$KC_PORT"
 KC_URL=http://keycloak.nonrtric:8080
 echo "Keycloak url: "$KC_URL
 
+KC_PROXY_PORT=$(kubectl get svc -n nonrtric keycloak-proxy --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
+echo "Nodeport to keycloak proxy: "$KC_PROXY_PORT
+
 __get_admin_token() {
     echo "Get admin token"
     ADMIN_TOKEN=""
     while [ "${#ADMIN_TOKEN}" -lt 20 ]; do
-        ADMIN_TOKEN=$(curl --proxy localhost:31784 -s -X POST --max-time 2     "$KC_URL/realms/master/protocol/openid-connect/token"     -H "Content-Type: application/x-www-form-urlencoded"     -d "username=admin" -d "password=admin" -d 'grant_type=password' -d "client_id=admin-cli"  |  jq -r '.access_token')
+        ADMIN_TOKEN=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s -X POST --max-time 2     "$KC_URL/realms/master/protocol/openid-connect/token"     -H "Content-Type: application/x-www-form-urlencoded"     -d "username=admin" -d "password=admin" -d 'grant_type=password' -d "client_id=admin-cli"  |  jq -r '.access_token')
         if [ "${#ADMIN_TOKEN}" -lt 20 ]; then
             echo "Could not get admin token, retrying..."
             echo "Retrieved token: $ADMIN_TOKEN"
@@ -57,7 +57,6 @@ indent2() { sed 's/^/  /'; }
 decode_token() {
     echo "Decoding access_token"
     echo $1 | jq -R 'split(".") | .[0,1] | @base64d | fromjson'
-    #echo $1 | jq -r .access_token | jq -R 'split(".") | .[1] | @base64d | fromjson'
 }
 
 decode_jwt() {
@@ -67,7 +66,8 @@ decode_jwt() {
 
 list_realms() {
     echo "Listing all realms"
-    curl --proxy localhost:31784 -s \
+    __check_admin_token
+    curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X GET \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms" | jq -r '.[].id' | indent2
@@ -76,7 +76,8 @@ delete_realms() {
     echo "$@"
     for realm in "$@"; do
         echo "Attempt to delete realm: $realm"
-        curl --proxy localhost:31784 -s \
+        __check_admin_token
+        curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X DELETE \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms/$realm" | indent1
@@ -92,7 +93,7 @@ create_realms() {
     echo "Creating realms: $@"
     while [ $# -gt 0 ]; do
         echo " Attempt to create realm: $1"
-
+        __check_admin_token
 cat > .jsonfile1 <<- "EOF"
 {
 "realm":"$__realm_name",
@@ -101,7 +102,7 @@ cat > .jsonfile1 <<- "EOF"
 EOF
         export __realm_name=$1
         envsubst < .jsonfile1 > .jsonfile2
-        curl --proxy localhost:31784 -s \
+        curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X POST \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         -H "Content-Type: application/json" \
@@ -120,7 +121,6 @@ create_clients() {
     __realm=$1
     shift
     echo "Attempt to create clients $@ for realm: $__realm"
-    __check_admin_token
 
 cat > .jsonfile1 <<- "EOF"
 {
@@ -133,9 +133,10 @@ cat > .jsonfile1 <<- "EOF"
 EOF
     while [ $# -gt 0 ]; do
         echo " Creating client: $1"
+        __check_admin_token
         export __client_name=$1
         envsubst < .jsonfile1 > .jsonfile2
-        curl --proxy localhost:31784 -s \
+        curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X POST \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         -H "Content-Type: application/json" \
@@ -151,7 +152,7 @@ EOF
 }
 
 __get_client_id() {
-    __client_data=$(curl --proxy localhost:31784 -s \
+    __client_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X GET \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms/$1/clients?clientId=$2")
@@ -167,8 +168,8 @@ generate_client_secrets() {
     __realm=$1
     shift
     echo "Attempt to generate secret for clients $@ in realm $__realm"
-    __check_admin_token
     while [ $# -gt 0 ]; do
+        __check_admin_token
         __client_id=$(__get_client_id $__realm $1)
         if [ $? -ne 0 ]; then
             echo "Command failed"
@@ -176,7 +177,7 @@ generate_client_secrets() {
         fi
         echo " Client id for client $1 in realm $__realm: "$__client_id | indent1
         echo "  Creating secret"
-        __client_secret=$(curl --proxy localhost:31784 -s \
+        __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
                 -X POST \
                 -H "Authorization: Bearer ${ADMIN_TOKEN}" \
                 "$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
@@ -184,7 +185,7 @@ generate_client_secrets() {
             echo "Command failed"
             exit 1
         fi
-        __client_secret=$(curl --proxy localhost:31784 -s \
+        __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
                 -X GET \
                 -H "Authorization: Bearer ${ADMIN_TOKEN}" \
                 "$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
@@ -202,6 +203,7 @@ generate_client_secrets() {
 
 create_client_roles() {
     # <realm-name> <client-name> [<role-name>]+
+    __check_admin_token
     __client_id=$(__get_client_id $1 $2)
     if [ $? -ne 0 ]; then
         echo "Command failed"
@@ -218,7 +220,7 @@ cat > .jsonfile1 <<- "EOF"
 EOF
         export __role=$1
         envsubst < .jsonfile1 > .jsonfile2
-        curl --proxy localhost:31784 -s \
+        curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X POST \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         -H "Content-Type: application/json" \
@@ -234,7 +236,7 @@ EOF
 
 __get_service_account_id() {
     # <realm-name> <client-id>
-    __service_account_data=$(curl --proxy localhost:31784 -s \
+    __service_account_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X GET \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms/$1/clients/$2/service-account-user")
@@ -246,13 +248,13 @@ __get_service_account_id() {
     return 0
 }
 
-#     curl --proxy localhost:31784 -s \
+#     curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
 #     -X GET \
 #     -H "Authorization: Bearer ${ADMIN_TOKEN}" \
 #     "$KC_URL/admin/realms/$__realm/users/$__service_account_id/role-mappings/clients/$__client_id/available"
 __get_client_available_role_id() {
     # <realm-name> <service-account-id> <client-id> <client-role-name>
-    __client_role_data=$(curl --proxy localhost:31784 -s \
+    __client_role_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X GET \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms/$1/users/$2/role-mappings/clients/$3/available")
@@ -267,7 +269,7 @@ __get_client_available_role_id() {
 
 __get_client_mapped_role_id() {
     # <realm-name> <service-account-id> <client-id> <client-role-name>
-    __client_role_data=$(curl --proxy localhost:31784 -s \
+    __client_role_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
         -X GET \
         -H "Authorization: Bearer ${ADMIN_TOKEN}" \
         "$KC_URL/admin/realms/$1/users/$2/role-mappings/clients/$3")
@@ -283,6 +285,7 @@ __get_client_mapped_role_id() {
 add_client_roles_mapping()  {
     # <realm-name> <client-name> [<role-name>]+
     echo "Attempt to add roles ${@:3} to client $2 in realm $1"
+    __check_admin_token
     __realm=$1
     __client=$2
     __client_id=$(__get_client_id $__realm $__client)
@@ -322,7 +325,7 @@ add_client_roles_mapping()  {
     echo "]" >> .jsonfile2
     echo "  Adding roles $__all_roles to client $__client in realm $__realm"
 
-    curl --proxy localhost:31784 -s \
+    curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
     -X POST \
     -H "Authorization: Bearer ${ADMIN_TOKEN}" \
     -H "Content-Type: application/json" \
@@ -340,6 +343,7 @@ add_client_roles_mapping()  {
 remove_client_roles_mapping()  {
     # <realm-name> <client-name> [<role-name>]+
     echo "Attempt to removed roles ${@:3} from client $2 in realm $1"
+    __check_admin_token
     __realm=$1
     __client=$2
     __client_id=$(__get_client_id $__realm $__client)
@@ -379,7 +383,7 @@ remove_client_roles_mapping()  {
     echo "]" >> .jsonfile2
     echo "  Removing roles $__all_roles from client $__client in realm $__realm"
 
-    curl --proxy localhost:31784 -s \
+    curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
     -X DELETE \
     -H "Authorization: Bearer ${ADMIN_TOKEN}" \
     -H "Content-Type: application/json" \
@@ -394,12 +398,13 @@ remove_client_roles_mapping()  {
 
 add_client_hardcoded-claim-mapper() {
 # <realm-name> <client-name> <mapper-name> <claim-name> <claim-value>
+    __check_admin_token
     __realm=$1
     __client=$2
     export __mapper_name=$3
     export __claim_name=$4
     export __claim_value=$5
-set -x
+
     __client_id=$(__get_client_id $__realm $__client)
     if [ $? -ne 0 ]; then
         echo " Fatal error when getting client id, response: "$?
@@ -422,7 +427,7 @@ set -x
 }
 EOF
     envsubst < .jsonfile1 > .jsonfile2
-    curl --proxy localhost:31784 -s \
+    curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
     -X POST \
     -H "Authorization: Bearer ${ADMIN_TOKEN}" \
     -H "Content-Type: application/json" \
@@ -440,6 +445,7 @@ EOF
 # Get a client token
 # args: <realm-name> <client-name>
 get_client_token() {
+    __check_admin_token
     __realm=$1
     __client=$2
     __client_id=$(__get_client_id $__realm $__client)
@@ -449,7 +455,7 @@ get_client_token() {
     fi
     #echo " Client id for client $__client in realm $__realm: "$__client_id | indent1
 
-    __client_secret=$(curl --proxy localhost:31784 -s -f \
+    __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s -f \
             -X GET \
             -H "Authorization: Bearer ${ADMIN_TOKEN}" \
             "$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
@@ -460,7 +466,7 @@ get_client_token() {
 
     __client_secret=$(echo $__client_secret | jq -r .value)
 
-       __TMP_TOKEN=$(curl --proxy localhost:31784 -f -s -X POST $KC_URL/realms/$__realm/protocol/openid-connect/token   \
+       __TMP_TOKEN=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -f -s -X POST $KC_URL/realms/$__realm/protocol/openid-connect/token   \
                   -H Content-Type:application/x-www-form-urlencoded \
                   -d client_id="$__client" -d client_secret="$__client_secret" -d grant_type=client_credentials)
        if [ $? -ne 0 ]; then
index 574ae70..1d5b7cc 100755 (executable)
@@ -67,9 +67,9 @@ else
 fi
 
 chmod +x kafka-client-send-genfiles-file-ready.sh
-kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/client:/home/appuser
+kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/kafka-client:/home/appuser
 
-kubectl exec client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
+kubectl exec kafka-client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh file-ready '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
 
 echo done
 
diff --git a/install/scripts/push-genfiles-to-ves-collector.sh b/install/scripts/push-genfiles-to-ves-collector.sh
new file mode 100755 (executable)
index 0000000..7b5fb43
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2023 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+SD=$(dirname -- "$0")
+echo "${0##*/} script-home: "$SD
+cd $SD
+CWD=$PWD
+
+
+NODE_COUNT=$1
+EVT_COUNT=$2
+NODE_NAME_BASE=$3
+FILE_EXT=$4
+TYPE=$5
+SRV_COUNT=$6
+HIST=$7
+
+print_usage() {
+    echo "Usage: push-genfiles-to-ves-collector.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
+    exit 1
+}
+if [ $# -lt 6 ] || [ $# -gt 7 ]; then
+    print_usage
+fi
+
+if [ $TYPE == "sftp" ]; then
+    echo "sftp servers not yet supported"
+elif [ $TYPE == "ftpes" ]; then
+    echo "ftpes servers not yet supported"
+elif [ $TYPE == "https" ]; then
+    :
+else
+    print_usage
+fi
+
+if [ $FILE_EXT != "xml.gz" ]; then
+    echo "only xml.gz format supported"
+    print_usage
+fi
+
+if [ ! -z "$HIST" ]; then
+    if [ $HIST != "hist" ]; then
+        print_usage
+    fi
+fi
+
+if [ "$KUBECONFIG" == "" ]; then
+    echo "Env var KUBECONFIG not set, using current settings for kubectl"
+else
+    echo "Env var KUBECONFIG set to $KUBECONFIG"
+fi
+
+chmod +x kafka-client-send-genfiles-file-ready.sh
+kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/kafka-client:/home/appuser
+
+kubectl exec kafka-client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh ves '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
+
+echo done
+
index 7b324ed..ed24340 100755 (executable)
@@ -44,7 +44,6 @@ echo "Uninstall $INST"
 
 helm repo remove strimzi
 helm uninstall   -n nonrtric strimzi-kafka-crds
-#kubectl delete -f 'https://strimzi.io/install/latest?namespace=nonrtric' -n nonrtric
 check_error $? "$INST"
 
 helm uninstall namespaces
index 38f1b92..80867f2 100644 (file)
 #  ============LICENSE_END=================================================
 #
 
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
 WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
 COPY main.go .
-RUN go build -o /kafka-pm-producer
+RUN go mod init main
+RUN go mod tidy
+
+RUN go build -o /pm-file-converter
 
 #Replaced distroless image with ubuntu for debug purpose
 #FROM gcr.io/distroless/base-debian11
 FROM ubuntu
 WORKDIR /
 ## Copy from "build" stage
-COPY --from=build /kafka-pm-producer .
+COPY --from=build /pm-file-converter .
 COPY server.key /server.key
 COPY server.crt /server.crt
 
@@ -36,4 +38,4 @@ COPY application_configuration.json /application_configuration.json
 
 ##Uncomment this when using distroless image
 #USER nonroot:nonroot
-ENTRYPOINT ["/kafka-pm-producer"]
+ENTRYPOINT ["/pm-file-converter"]
index 2b6d542..93c19d8 100644 (file)
@@ -1,9 +1,7 @@
 
+## PM Data Producer
 
-## Producer
-
-Producer supporting data types for pm xml to json conversion and pm json filtering
-
+### Manual build, tag and push to image repo
 
 Build for docker or local kubernetes\
 `./build.sh no-push`
@@ -11,6 +9,38 @@ Build for docker or local kubernetes\
 Build for remote kubernetes - an externally accessible image repo (e.g. docker hub) is needed  \
 `./build.sh <external-image-repo>`
 
+### Function
+
+Producer supporting data types for pm xml to json conversion, pm json filtering with output to kafka or influx db.
+
+### Configuration
+
+The app expects the following environment variables:
+
+- CREDS_GRANT_TYPE :  Grant type (keycloak)
+- CREDS_CLIENT_SECRET: Client secret (keycloak)
+- CREDS_CLIENT_ID : Client id (keycloak)
+- AUTH_SERVICE_URL : Url to keycloak for fetching tokens
+- KAFKA_SERVER : Host and port to kafka bootstrap server
+- ICS : Host and port to the Information Coordination Service
+- SELF: Host and port of this app
+
+The following env vars are optional
+FILES_VOLUME : Path to persistent file storage (optional)
+FILESTORE_USER : Minio filestore user
+FILESTORE_PWD : Minio filestore password
+FILESTORE_SERVER: Host and port of the minio filestore
+KP : Id of the app
+
+The app can be configured to read file from a mounted file system or from a filestore server (minio).
+
+Mounted files:
+Configure Â´FILES_VOLUME´ and leave var starting with FILESTORE empty.
+
+Filestore:
+Configure env var starting with FILESTORE and leave Â´FILES_VOLUME´empty.
+
+
 
 ## License
 
index 69bcc9a..4803020 100755 (executable)
 #
 
 #Build image from Dockerfile with/without custom image tag
-#Optionally push to external docker hub repo
+#Optionally push to external image repo
 
 print_usage() {
-    echo "Usage: build.sh no-push|bm|<docker-hub-repo-name> [<image-tag>]"
+    echo "Usage: build.sh no-push|<docker-hub-repo-name> [--tag <image-tag>]"
     exit 1
 }
 
-if [ $# -ne 1 ] && [ $# -ne 2 ]; then
+if [ $# -lt 1 ] || [ $# -gt 2 ]; then
     print_usage
 fi
 
-IMAGE_NAME="kafka-pm-producer"
+IMAGE_NAME="pm-file-converter"
 IMAGE_TAG="latest"
 REPO=""
 if [ $1 == "no-push" ]; then
@@ -38,15 +38,38 @@ else
     REPO=$1
     echo "Attempt to push built image to: "$REPO
 fi
+shift
+while [ $# -ne 0 ]; do
+    if [ $1 == "--tag" ]; then
+        shift
+        if [ -z "$1" ]; then
+            print_usage
+        fi
+        IMAGE_TAG=$1
+        echo "Setting image tag to: "$IMAGE_TAG
+        shift
+    else
+        echo "Unknown parameter: $1"
+        print_usage
+    fi
+done
 
-if [ "$2" != "" ]; then
-    IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+./gen-cert.sh
+echo ""
+echo "Certs generated"
 
 IMAGE=$IMAGE_NAME:$IMAGE_TAG
-echo "Building image $IMAGE"
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+    echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
+echo "Building image: $IMAGE with architecture: $DOCKER_DEFAULT_PLATFORM"
+
 docker build -t $IMAGE_NAME:$IMAGE_TAG .
+
 if [ $? -ne 0 ]; then
     echo "BUILD FAILED"
     exit 1
index 748f860..8fa677e 100755 (executable)
 
 openssl genrsa -out server.key 2048
 openssl ecparam -genkey -name secp384r1 -out server.key
-openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
+cat <<__EOF__ |  openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
+SE
+.
+.
+.
+.
+.
+.
+__EOF__
\ No newline at end of file
index 9c3d8a4..d37b0d2 100644 (file)
@@ -444,13 +444,12 @@ func main() {
 
        cer, err := tls.LoadX509KeyPair(server_crt, server_key)
        if err != nil {
-               log.Error("Cannot load key and cert - %v\n", err)
+               log.Error("Cannot load key and cert - ", err)
                return
        }
        config := &tls.Config{Certificates: []tls.Certificate{cer}}
        https_server := &http.Server{Addr: ":" + strconv.Itoa(https_port), TLSConfig: config, Handler: nil}
 
-       //TODO: Make http on/off configurable
        // Run http
        go func() {
                log.Info("Starting http service...")
@@ -458,11 +457,10 @@ func main() {
                if err == http.ErrServerClosed { // graceful shutdown
                        log.Info("http server shutdown...")
                } else if err != nil {
-                       log.Error("http server error: %v\n", err)
+                       log.Error("http server error: ", err)
                }
        }()
 
-       //TODO: Make https on/off configurable
        //  Run https
        go func() {
                log.Info("Starting https service...")
@@ -470,7 +468,7 @@ func main() {
                if err == http.ErrServerClosed { // graceful shutdown
                        log.Info("https server shutdown...")
                } else if err != nil {
-                       log.Error("https server error: %v\n", err)
+                       log.Error("https server error: ", err)
                }
        }()
        check_tcp(strconv.Itoa(http_port))
@@ -596,7 +594,6 @@ func register_producer() bool {
                        log.Error("Registering producer: ", producer_instance_name, " - failed")
                        return false
                } else {
-                       //TODO: http/https should be configurable
                        ok := send_http_request(json, http.MethodPut, "http://"+ics_server+"/data-producer/v1/info-types/"+data.ProdDataTypes[i].ID, true, creds_grant_type != "")
                        if !ok {
                                log.Error("Cannot register type: ", data.ProdDataTypes[i].ID)
@@ -611,9 +608,7 @@ func register_producer() bool {
        log.Debug("Registering types: ", new_type_names)
        m := make(map[string]interface{})
        m["supported_info_types"] = new_type_names
-       //TODO: http/https should be configurable
        m["info_job_callback_url"] = "http://" + self + "/callbacks/job/" + producer_instance_name
-       //TODO: http/https should be configurable
        m["info_producer_supervision_callback_url"] = "http://" + self + "/callbacks/supervision/" + producer_instance_name
 
        json, err := json.Marshal(m)
@@ -673,7 +668,6 @@ func remove_type_job(dp DataType) {
 
        if dp.ext_job_created == true {
                dp.ext_job_id = dp.InputJobType + "_" + generate_uuid_from_type(dp.InputJobType)
-               //TODO: http/https should be configurable
                ok := send_http_request(*dp.ext_job, http.MethodDelete, "http://"+ics_server+"/data-consumer/v1/info-jobs/"+dp.ext_job_id, true, creds_grant_type != "")
                if !ok {
                        log.Error("Cannot delete job: ", dp.ext_job_id)
@@ -709,8 +703,6 @@ func start_type_job(dp DataType) {
                go start_job_json_file_data(dp.ID, job_record.job_control, job_record.data_in_channel, data_out_channel, false)
        case "json-file-data-from-filestore-to-influx":
                go start_job_json_file_data_influx(dp.ID, job_record.job_control, job_record.data_in_channel, true)
-       // case "json-data-to-influx":
-       //      go start_job_json_data_influx(dp.ID, job_record.job_control, job_record.data_in_channel)
 
        default:
        }
@@ -741,13 +733,11 @@ func create_ext_job(dp DataType) {
                }
 
                dp.ext_job_id = dp.InputJobType + "_" + generate_uuid_from_type(dp.InputJobType)
-               //TODO: http/https should be configurable
                ok := false
                for !ok {
                        ok = send_http_request(json, http.MethodPut, "http://"+ics_server+"/data-consumer/v1/info-jobs/"+dp.ext_job_id, true, creds_grant_type != "")
                        if !ok {
                                log.Error("Cannot register job: ", dp.InputJobType)
-                               //TODO: Restart after long time?
                        }
                }
                log.Debug("Registered job ok: ", dp.InputJobType)
@@ -833,132 +823,6 @@ func send_http_request(json []byte, method string, url string, retry bool, useAu
        return false
 }
 
-// // Send a http request with json (json may be nil)
-// func send_http_request(json []byte, method string, url string, retry bool, useAuth bool) bool {
-
-//     // set the HTTP method, url, and request body
-//     var req *http.Request
-//     var err error
-//     if json == nil {
-//             req, err = http.NewRequest(method, url, http.NoBody)
-//     } else {
-//             req, err = http.NewRequest(method, url, bytes.NewBuffer(json))
-//             req.Header.Set("Content-Type", "application/json; charset=utf-8")
-//     }
-//     if err != nil {
-//             log.Error("Cannot create http request, method: ", method, " url: ", url)
-//             return false
-//     }
-
-//     if useAuth {
-//             token, err := fetch_token()
-//             if err != nil {
-//                     log.Error("Cannot fetch token for http request: ", err)
-//                     return false
-//             }
-//             req.Header.Set("Authorization", "Bearer "+token.TokenValue)
-//     }
-
-//     log.Debug("HTTP request: ", req)
-
-//     retries := 1
-//     if retry {
-//             retries = 5
-//     }
-//     sleep_time := 1
-//     for i := retries; i > 0; i-- {
-//             log.Debug("Sending http request")
-//             resp, err2 := httpclient.Do(req)
-//             if err2 != nil {
-//                     log.Error("Http request error: ", err2)
-//                     log.Error("Cannot send http request method: ", method, " url: ", url, " - retries left: ", i-1)
-
-//                     time.Sleep(time.Duration(sleep_time) * time.Second)
-//                     sleep_time = 2 * sleep_time
-//             } else {
-//                     if resp.StatusCode == 200 || resp.StatusCode == 201 || resp.StatusCode == 204 {
-//                             log.Debug("Accepted http status: ", resp.StatusCode)
-//                             resp.Body.Close()
-//                             return true
-//                     }
-//                     log.Debug("HTTP resp: ", resp)
-//                     resp.Body.Close()
-//             }
-//     }
-//     return false
-// }
-
-// // Send a http request with json (json may be nil)
-// func send_http_request(json []byte, method string, url string, retry bool, useAuth bool) bool {
-//     // initialize http client
-//     client := &http.Client{}
-
-//     // set the HTTP method, url, and request body
-//     var req *http.Request
-//     var err error
-//     if json == nil {
-//             req, err = http.NewRequest(method, url, http.NoBody)
-//     } else {
-//             req, err = http.NewRequest(method, url, bytes.NewBuffer(json))
-//             req.Header.Set("Content-Type", "application/json; charset=utf-8")
-//     }
-//     if err != nil {
-//             log.Error("Cannot create http request method: ", method, " url: ", url)
-//             return false
-//     }
-
-//     useAuth = false
-//     if useAuth {
-//             token, err := fetch_token()
-//             if err != nil {
-//                     log.Error("Cannot fetch token for http request: ", err)
-//                     return false
-//             }
-//             req.Header.Add("Authorization", "Bearer "+token.TokenValue)
-//     }
-//     log.Debug("HTTP request: ", req)
-
-//     b, berr := io.ReadAll(req.Body)
-//     if berr == nil {
-//             log.Debug("HTTP request body length: ", len(b))
-//     } else {
-//             log.Debug("HTTP request - cannot check body length: ", berr)
-//     }
-//     if json == nil {
-//             log.Debug("HTTP request null json")
-//     } else {
-//             log.Debug("HTTP request json: ", string(json))
-//     }
-//     requestDump, cerr := httputil.DumpRequestOut(req, true)
-//     if cerr != nil {
-//             fmt.Println(cerr)
-//     }
-//     fmt.Println(string(requestDump))
-
-//     retries := 1
-//     if retry {
-//             retries = 5
-//     }
-//     sleep_time := 1
-//     for i := retries; i > 0; i-- {
-//             resp, err2 := client.Do(req)
-//             if err2 != nil {
-//                     log.Error("Http request error: ", err2)
-//                     log.Error("Cannot send http request method: ", method, " url: ", url, " - retries left: ", i)
-
-//                     time.Sleep(time.Duration(sleep_time) * time.Second)
-//                     sleep_time = 2 * sleep_time
-//             } else {
-//                     if resp.StatusCode == 200 || resp.StatusCode == 201 || resp.StatusCode == 204 {
-//                             log.Debug("Accepted http status: ", resp.StatusCode)
-//                             defer resp.Body.Close()
-//                             return true
-//                     }
-//             }
-//     }
-//     return false
-// }
-
 func fetch_token() (*kafka.OAuthBearerToken, error) {
        log.Debug("Get token inline")
        conf := &clientcredentials.Config{
@@ -979,8 +843,6 @@ func fetch_token() (*kafka.OAuthBearerToken, error) {
        log.Debug("=====================================================")
        log.Debug("Expiration: ", token.Expiry)
        t := token.Expiry
-       // t := token.Expiry.Add(-time.Minute)
-       // log.Debug("Modified expiration: ", t)
        oauthBearerToken := kafka.OAuthBearerToken{
                TokenValue: token.AccessToken,
                Expiration: t,
@@ -1170,8 +1032,6 @@ func create_job(w http.ResponseWriter, req *http.Request) {
                return
        }
 
-       //TODO: Verify that job contains enough parameters...
-
        if !job_found {
                job_record = InfoJobRecord{}
                job_record.job_info = t
@@ -1190,7 +1050,6 @@ func create_job(w http.ResponseWriter, req *http.Request) {
 
                jc.command = "ADD-FILTER"
 
-               //TODO: Refactor
                if t.InfoTypeIdentity == "json-file-data-from-filestore" || t.InfoTypeIdentity == "json-file-data" || t.InfoTypeIdentity == "json-file-data-from-filestore-to-influx" {
                        fm := FilterMaps{}
                        fm.sourceNameMap = make(map[string]bool)
@@ -1292,7 +1151,7 @@ func supervise_producer(w http.ResponseWriter, req *http.Request) {
        w.WriteHeader(http.StatusOK)
 }
 
-// producer statictics, all jobs
+// producer statistics, all jobs
 func statistics(w http.ResponseWriter, req *http.Request) {
        if req.Method != http.MethodGet {
                w.WriteHeader(http.StatusMethodNotAllowed)
@@ -1407,7 +1266,6 @@ func start_topic_reader(topic string, type_id string, control_ch chan ReaderCont
                case reader_ctrl := <-control_ch:
                        if reader_ctrl.command == "EXIT" {
                                log.Info("Topic reader on topic: ", topic, " for type: ", type_id, " - stopped")
-                               //TODO: Stop consumer if present?
                                data_ch <- nil //Signal to job handler
                                running = false
                                return
@@ -1457,7 +1315,6 @@ func start_topic_reader(topic string, type_id string, control_ch chan ReaderCont
                                                }
                                        }
                                default:
-                                       //TODO: Handle these?
                                        log.Debug("Dumping topic reader event on topic: ", topic, " for type: ", type_id, " evt: ", evt.String())
                                }
 
@@ -1475,7 +1332,6 @@ func start_topic_reader(topic string, type_id string, control_ch chan ReaderCont
 
        go func() {
                for {
-                       //maxDur := 1 * time.Second
                        for {
                                select {
                                case reader_ctrl := <-control_ch:
@@ -1498,9 +1354,8 @@ func start_topic_reader(topic string, type_id string, control_ch chan ReaderCont
                                                var kmsg KafkaPayload
                                                kmsg.msg = e
 
-                                               c.Commit() //TODO: Ok?
+                                               c.Commit()
 
-                                               //TODO: Check for exception
                                                data_ch <- &kmsg
                                                stats.in_msg_cnt++
                                                log.Debug("Reader msg: ", &kmsg)
@@ -1524,24 +1379,6 @@ func start_topic_reader(topic string, type_id string, control_ch chan ReaderCont
                                        default:
                                                fmt.Printf("Ignored %v\n", e)
                                        }
-
-                                       // orig code
-                                       // msg, err := c.ReadMessage(maxDur)
-                                       // if err == nil {
-                                       //      var kmsg KafkaPayload
-                                       //      kmsg.msg = msg
-
-                                       //      c.Commit() //TODO: Ok?
-
-                                       //      //TODO: Check for exception
-                                       //      data_ch <- &kmsg
-                                       //      stats.in_msg_cnt++
-                                       //      log.Debug("Reader msg: ", &kmsg)
-                                       //      log.Debug("Reader - data_ch ", data_ch)
-                                       // } else {
-                                       //      log.Debug("Topic Reader for type: ", type_id, "  Nothing to consume on topic: ", topic, ", reason: ", err)
-                                       // }
-
                                }
                        }
                }
@@ -1569,7 +1406,6 @@ func start_topic_writer(control_ch chan WriterControl, data_ch chan *KafkaPayloa
                                time.Sleep(1 * time.Second)
                        } else {
                                log.Debug("Kafka producer started")
-                               //defer kafka_producer.Close()
                        }
                }
        }
@@ -1579,7 +1415,6 @@ func start_topic_writer(control_ch chan WriterControl, data_ch chan *KafkaPayloa
                for {
                        select {
                        case evt := <-kafka_producer.Events():
-                               //TODO: Handle this? Probably yes, look if the msg was delivered and if not, resend?
                                switch evt.(type) {
                                case *kafka.Message:
                                        m := evt.(*kafka.Message)
@@ -1630,7 +1465,6 @@ func start_topic_writer(control_ch chan WriterControl, data_ch chan *KafkaPayloa
                        case kmsg := <-data_ch:
                                if kmsg == nil {
                                        event_chan <- 0
-                                       // TODO: Close producer?
                                        log.Info("Topic writer stopped by channel signal - start_topic_writer")
                                        defer kafka_producer.Close()
                                        return
@@ -1654,7 +1488,6 @@ func start_topic_writer(control_ch chan WriterControl, data_ch chan *KafkaPayloa
                                        }
                                }
                                if !msg_ok {
-                                       //TODO: Retry sending msg?
                                        log.Error("Topic writer failed to send message on topic: ", kmsg.topic, " - Msg discarded. Error details: ", err)
                                }
                        case <-time.After(1000 * time.Millisecond):
@@ -1676,7 +1509,6 @@ func create_kafka_consumer(type_id string, gid string, cid string) *kafka.Consum
                        "client.id":          cid,
                        "auto.offset.reset":  "latest",
                        "enable.auto.commit": false,
-                       //"auto.commit.interval.ms": 5000,
                }
        } else {
                log.Info("Creating kafka SASL plain text consumer for type: ", type_id)
@@ -1692,15 +1524,11 @@ func create_kafka_consumer(type_id string, gid string, cid string) *kafka.Consum
        }
        c, err := kafka.NewConsumer(&cm)
 
-       //TODO: How to handle autocommit or commit message by message
-       //TODO: Make arg to kafka configurable
-
        if err != nil {
                log.Error("Cannot create kafka consumer for type: ", type_id, ", error details: ", err)
                return nil
        }
 
-       //c.Commit()
        log.Info("Created kafka consumer for type: ", type_id, " OK")
        return c
 }
@@ -1815,7 +1643,6 @@ func start_job_xml_file_data(type_id string, control_ch chan JobControl, data_in
                                        }
                                }
                        }
-                       //TODO: Sort processed file conversions in order (FIFO)
                        jobLimiterChan <- struct{}{}
                        go run_xml_job(type_id, msg, "gz", data_out_channel, topic_list, jobLimiterChan, fvolume, fsbucket, mc, mc_id)
 
@@ -1825,7 +1652,6 @@ func start_job_xml_file_data(type_id string, control_ch chan JobControl, data_in
                        }
                }
        }
-       //}()
 }
 
 func run_xml_job(type_id string, msg *KafkaPayload, outputCompression string, data_out_channel chan *KafkaPayload, topic_list map[string]string, jobLimiterChan chan struct{}, fvolume string, fsbucket string, mc *minio.Client, mc_id string) {
@@ -1854,7 +1680,6 @@ func run_xml_job(type_id string, msg *KafkaPayload, outputCompression string, da
 
        var reader io.Reader
 
-       //TODO -> config
        INPUTBUCKET := "ropfiles"
 
        filename := ""
@@ -1868,7 +1693,6 @@ func run_xml_job(type_id string, msg *KafkaPayload, outputCompression string, da
                }
                defer fi.Close()
                reader = fi
-               //} else if evt_data.ObjectStoreBucket != "" {
        } else {
                filename = evt_data.Name
                if mc != nil {
@@ -2038,7 +1862,6 @@ func xml_to_json_conv(f_byteValue *[]byte, xfeh *XmlFileEventHeader) ([]byte, er
        start = time.Now()
        var pmfile PMJsonFile
 
-       //TODO: Fill in more values
        pmfile.Event.Perf3GppFields.Perf3GppFieldsVersion = "1.0"
        pmfile.Event.Perf3GppFields.MeasDataCollection.GranularityPeriod = 900
        pmfile.Event.Perf3GppFields.MeasDataCollection.MeasuredEntityUserName = ""
@@ -2104,7 +1927,6 @@ func start_job_json_file_data(type_id string, control_ch chan JobControl, data_i
 
        filters := make(map[string]Filter)
        filterParams_list := make(map[string]FilterMaps)
-       // ch_list := make(map[string]chan *KafkaPayload)
        topic_list := make(map[string]string)
        var mc *minio.Client
        const mc_id = "mc_" + "start_job_json_file_data"
@@ -2115,9 +1937,7 @@ func start_job_json_file_data(type_id string, control_ch chan JobControl, data_i
                        log.Debug("Type job ", type_id, " new cmd received ", job_ctl.command)
                        switch job_ctl.command {
                        case "EXIT":
-                               //ignore cmd - handled by channel signal
                        case "ADD-FILTER":
-                               //TODO: Refactor...
                                filters[job_ctl.filter.JobId] = job_ctl.filter
                                log.Debug("Type job ", type_id, " updated, topic: ", job_ctl.filter.OutputTopic, " jobid: ", job_ctl.filter.JobId)
 
@@ -2135,7 +1955,6 @@ func start_job_json_file_data(type_id string, control_ch chan JobControl, data_i
                                tmp_topic_list[job_ctl.filter.JobId] = job_ctl.filter.OutputTopic
                                topic_list = tmp_topic_list
                        case "REMOVE-FILTER":
-                               //TODO: Refactor...
                                log.Debug("Type job ", type_id, " removing job: ", job_ctl.filter.JobId)
 
                                tmp_filterParams_list := make(map[string]FilterMaps)
@@ -2200,8 +2019,6 @@ func run_json_job(type_id string, msg *KafkaPayload, outputCompression string, f
 
        var reader io.Reader
 
-       //TODO -> config
-       //INPUTBUCKET := "json-file-ready"
        INPUTBUCKET := "pm-files-json"
        filename := ""
        if objectstore == false {
@@ -2260,16 +2077,6 @@ func run_json_job(type_id string, msg *KafkaPayload, outputCompression string, f
                log.Debug("Read file/obj: ", filename, "for type job: ", type_id, " time: ", time.Since(start).String())
        }
 
-       // The code was moved to inside the below for loop - a deep copy of PM PMJsonFile is need to keep it outside
-       // var pmfile PMJsonFile
-       // start := time.Now()
-       // err = jsoniter.Unmarshal(*data, &pmfile)
-       // log.Debug("Json unmarshal obj: ", filename, " time: ", time.Since(start).String())
-
-       // if err != nil {
-       //      log.Error("Msg could not be unmarshalled - discarding message, obj: ", filename, " - error details:", err)
-       //      return
-       // }
        for k, v := range filterList {
 
                var pmfile PMJsonFile
@@ -2290,27 +2097,14 @@ func run_json_job(type_id string, msg *KafkaPayload, outputCompression string, f
                log.Debug("measObjClassMap:", v.measObjClassMap)
                log.Debug("measObjInstIdsMap:", v.measObjInstIdsMap)
                log.Debug("measTypesMap:", v.measTypesMap)
-               //BMX if len(v.sourceNameMap) > 0 || len(v.measObjInstIdsMap) > 0 || len(v.measTypesMap) > 0 || len(v.measObjClassMap) > 0 {
+
                b := json_pm_filter_to_byte(evt_data.Filename, &pmfile, v.sourceNameMap, v.measObjClassMap, v.measObjInstIdsMap, v.measTypesMap)
                if b == nil {
                        log.Info("File/obj ", filename, "for job: ", k, " - empty after filering, discarding message ")
                        return
                }
                kmsg.msg.Value = *b
-               //BMX}
-
-               // if outputCompression == "json.gz" {
-               //      start := time.Now()
-               //      var buf bytes.Buffer
-               //      err := gzipWrite(&buf, &kmsg.msg.Value)
-               //      if err != nil {
-               //              log.Error("Cannot compress file/obj ", filename, "for job: ", job_id, " - discarding message, error details", err)
-               //              return
-
-               //      }
-               //      kmsg.msg.Value = buf.Bytes()
-               //      log.Debug("Compress file/obj ", filename, "for job: ", job_id, " time:", time.Since(start).String())
-               // }
+
                kmsg.topic = topic_list[k]
                kmsg.jobid = k
 
@@ -2441,122 +2235,6 @@ func json_pm_filter_to_obj(resource string, data *PMJsonFile, sourceNameMap map[
        return data
 }
 
-// func json_pm_filter(resource string, data *PMJsonFile, sourceNameMap map[string]bool, measObjClassMap map[string]bool, measObjInstIdsMap map[string]bool, measTypesMap map[string]bool) *[]byte {
-
-//     filter_req := true
-//     start := time.Now()
-//     if len(sourceNameMap) != 0 {
-//             if !sourceNameMap[data.Event.CommonEventHeader.SourceName] {
-//                     filter_req = false
-//                     return nil
-//             }
-//     }
-//     if filter_req {
-//             modified := false
-//             var temp_mil []MeasInfoList
-//             for _, zz := range data.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList {
-
-//                     check_cntr := false
-//                     var cnt_flags []bool
-//                     if len(measTypesMap) > 0 {
-//                             c_cntr := 0
-//                             var temp_mtl []string
-//                             for _, v := range zz.MeasTypes.SMeasTypesList {
-//                                     if measTypesMap[v] {
-//                                             cnt_flags = append(cnt_flags, true)
-//                                             c_cntr++
-//                                             temp_mtl = append(temp_mtl, v)
-//                                     } else {
-//                                             cnt_flags = append(cnt_flags, false)
-//                                     }
-//                             }
-//                             if c_cntr > 0 {
-//                                     check_cntr = true
-//                                     zz.MeasTypes.SMeasTypesList = temp_mtl
-//                             } else {
-//                                     modified = true
-//                                     continue
-//                             }
-//                     }
-//                     keep := false
-//                     var temp_mvl []MeasValues
-//                     for _, yy := range zz.MeasValuesList {
-//                             keep_class := false
-//                             keep_inst := false
-//                             keep_cntr := false
-
-//                             dna := strings.Split(yy.MeasObjInstID, ",")
-//                             instName := dna[len(dna)-1]
-//                             cls := strings.Split(dna[len(dna)-1], "=")[0]
-
-//                             if len(measObjClassMap) > 0 {
-//                                     if measObjClassMap[cls] {
-//                                             keep_class = true
-//                                     }
-//                             } else {
-//                                     keep_class = true
-//                             }
-
-//                             if len(measObjInstIdsMap) > 0 {
-//                                     if measObjInstIdsMap[instName] {
-//                                             keep_inst = true
-//                                     }
-//                             } else {
-//                                     keep_inst = true
-//                             }
-
-//                             if check_cntr {
-//                                     var temp_mrl []MeasResults
-//                                     cnt_p := 1
-//                                     for _, v := range yy.MeasResultsList {
-//                                             if cnt_flags[v.P-1] {
-//                                                     v.P = cnt_p
-//                                                     cnt_p++
-//                                                     temp_mrl = append(temp_mrl, v)
-//                                             }
-//                                     }
-//                                     yy.MeasResultsList = temp_mrl
-//                                     keep_cntr = true
-//                             } else {
-//                                     keep_cntr = true
-//                             }
-//                             if keep_class && keep_cntr && keep_inst {
-//                                     keep = true
-//                                     temp_mvl = append(temp_mvl, yy)
-//                             }
-//                     }
-//                     if keep {
-//                             zz.MeasValuesList = temp_mvl
-//                             temp_mil = append(temp_mil, zz)
-//                             modified = true
-//                     }
-
-//             }
-//             //Only if modified
-//             if modified {
-//                     if len(temp_mil) == 0 {
-//                             log.Debug("Msg filtered, nothing found, discarding, obj: ", resource)
-//                             return nil
-//                     }
-//                     data.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList = temp_mil
-//             }
-//     }
-//     log.Debug("Filter: ", time.Since(start).String())
-
-//     start = time.Now()
-//     j, err := jsoniter.Marshal(&data)
-
-//     log.Debug("Json marshal obj: ", resource, " time: ", time.Since(start).String())
-
-//     if err != nil {
-//             log.Error("Msg could not be marshalled discarding message, obj: ", resource, ", error details: ", err)
-//             return nil
-//     }
-
-//     log.Debug("Filtered json obj: ", resource, " len: ", len(j))
-//     return &j
-// }
-
 func start_job_json_file_data_influx(type_id string, control_ch chan JobControl, data_in_ch chan *KafkaPayload, objectstore bool) {
 
        log.Info("Type job", type_id, " started")
@@ -2575,7 +2253,7 @@ func start_job_json_file_data_influx(type_id string, control_ch chan JobControl,
                        case "EXIT":
                                //ignore cmd - handled by channel signal
                        case "ADD-FILTER":
-                               //TODO: Refactor...
+
                                filters[job_ctl.filter.JobId] = job_ctl.filter
                                log.Debug("Type job ", type_id, " updated, topic: ", job_ctl.filter.OutputTopic, " jobid: ", job_ctl.filter.JobId)
                                log.Debug(job_ctl.filter)
@@ -2594,7 +2272,7 @@ func start_job_json_file_data_influx(type_id string, control_ch chan JobControl,
                                influx_job_params = tmp_influx_job_params
 
                        case "REMOVE-FILTER":
-                               //TODO: Refactor...
+
                                log.Debug("Type job ", type_id, " removing job: ", job_ctl.filter.JobId)
 
                                tmp_filterParams_list := make(map[string]FilterMaps)
@@ -2629,7 +2307,7 @@ func start_job_json_file_data_influx(type_id string, control_ch chan JobControl,
                                        }
                                }
                        }
-                       //TODO: Sort processed file conversions in order (FIFO)
+
                        jobLimiterChan <- struct{}{}
                        go run_json_file_data_job_influx(type_id, msg, filterParams_list, influx_job_params, jobLimiterChan, mc, mc_id, objectstore)
 
@@ -2661,8 +2339,6 @@ func run_json_file_data_job_influx(type_id string, msg *KafkaPayload, filterList
 
        var reader io.Reader
 
-       //TODO -> config
-       //INPUTBUCKET := "json-file-ready"
        INPUTBUCKET := "pm-files-json"
        filename := ""
        if objectstore == false {
@@ -2720,16 +2396,6 @@ func run_json_file_data_job_influx(type_id string, msg *KafkaPayload, filterList
 
                log.Debug("Read file/obj: ", filename, "for type job: ", type_id, " time: ", time.Since(start).String())
        }
-       // The code was moved to inside the below for loop - a deep copy of PM PMJsonFile is need to keep it outside
-       // var pmfile PMJsonFile
-       // start := time.Now()
-       // err = jsoniter.Unmarshal(*data, &pmfile)
-       // log.Debug("Json unmarshal obj: ", filename, " time: ", time.Since(start).String())
-
-       // if err != nil {
-       //      log.Error("Msg could not be unmarshalled - discarding message, obj: ", filename, " - error details:", err)
-       //      return
-       // }
        for k, v := range filterList {
 
                var pmfile PMJsonFile
@@ -2755,16 +2421,10 @@ func run_json_file_data_job_influx(type_id string, msg *KafkaPayload, filterList
                client := influxdb2.NewClient(fluxParms.DbUrl, fluxParms.DbToken)
                writeAPI := client.WriteAPIBlocking(fluxParms.DbOrg, fluxParms.DbBucket)
 
-               // fmt.Println(pmfile.Event.CommonEventHeader.StartEpochMicrosec)
-               // tUnix := pmfile.Event.CommonEventHeader.StartEpochMicrosec / int64(time.Millisecond)
-               // tUnixNanoRemainder := (pmfile.Event.CommonEventHeader.StartEpochMicrosec % int64(time.Millisecond)) * int64(time.Microsecond)
-               // timeT := time.Unix(tUnix, tUnixNanoRemainder)
-               // fmt.Println(timeT)
-               // fmt.Println("======================")
                for _, zz := range pmfile.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList {
                        ctr_names := make(map[string]string)
                        for cni, cn := range zz.MeasTypes.SMeasTypesList {
-                               ctr_names[string(cni+1)] = cn
+                               ctr_names[strconv.Itoa(cni+1)] = cn
                        }
                        for _, xx := range zz.MeasValuesList {
                                log.Debug("Measurement: ", xx.MeasObjInstID)
@@ -2774,7 +2434,7 @@ func run_json_file_data_job_influx(type_id string, msg *KafkaPayload, filterList
                                p.AddField("granularityperiod", pmfile.Event.Perf3GppFields.MeasDataCollection.GranularityPeriod)
                                p.AddField("timezone", pmfile.Event.CommonEventHeader.TimeZoneOffset)
                                for _, yy := range xx.MeasResultsList {
-                                       pi := string(yy.P)
+                                       pi := strconv.Itoa(yy.P)
                                        pv := yy.SValue
                                        pn := ctr_names[pi]
                                        log.Debug("Counter: ", pn, " Value: ", pv)
diff --git a/pm-file-converter/server.crt b/pm-file-converter/server.crt
new file mode 100644 (file)
index 0000000..db6ad86
--- /dev/null
@@ -0,0 +1,9 @@
+-----BEGIN CERTIFICATE-----
+MIIBRzCBzQIJAJkBcpKz3wnCMAoGCCqGSM49BAMCMA0xCzAJBgNVBAYTAlNFMB4X
+DTIzMDUyMjA3Mjg1MloXDTMzMDUxOTA3Mjg1MlowDTELMAkGA1UEBhMCU0UwdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQ8KdpOCp9l+L+JcW2dpy95Ir72FUGOR8Uvcyrg
+OTAcRN9fNCLXIP3PHVj6UxpemtE72R7zU8qADQV1DYZayEKDaIikf9njPfjenqud
+LUSICMjigLL1XznnGe0nCDaA24EwCgYIKoZIzj0EAwIDaQAwZgIxAI6jg1kSSjf7
+3gbfK5UEWun+HVsxiQyBpBm7DhARvtkJsLUiVV+RMF8mukAOrXjHYgIxAM4RuOkz
+56JuyrG6CouKJlHy2Q0R+Olg2GY8bIV9GzePG4NWYfT//z1vjfEmmiNMcA==
+-----END CERTIFICATE-----
diff --git a/pm-file-converter/server.key b/pm-file-converter/server.key
new file mode 100644 (file)
index 0000000..13e18e1
--- /dev/null
@@ -0,0 +1,9 @@
+-----BEGIN EC PARAMETERS-----
+BgUrgQQAIg==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDwFMblWUdInRL04EnD+J2y6h06559cdvMz7SjuzaHErcwcrC+slm1
+jSKpiV8hgY6gBwYFK4EEACKhZANiAAQ8KdpOCp9l+L+JcW2dpy95Ir72FUGOR8Uv
+cyrgOTAcRN9fNCLXIP3PHVj6UxpemtE72R7zU8qADQV1DYZayEKDaIikf9njPfje
+nqudLUSICMjigLL1XznnGe0nCDaA24E=
+-----END EC PRIVATE KEY-----
index 1d138c5..1869c36 100644 (file)
 #  ============LICENSE_END=================================================
 #
 
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
 WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
 COPY main.go .
+RUN go mod init main
+RUN go mod tidy
+
 RUN go build -o /pm-rapp
 
-#Replaced distroless image with ubuntu for debug purposes (seem to be cert problems with distroless image...?)
 FROM gcr.io/distroless/base-debian11
-#FROM ubuntu
+
 WORKDIR /
 ## Copy from "build" stage
 COPY --from=build /pm-rapp .
 
-##Uncomment this when using distroless image
-USER nonroot:nonroot
 ENTRYPOINT ["/pm-rapp"]
index 99a8260..5c951af 100644 (file)
@@ -1,7 +1,6 @@
 
 
-## Basic rAPP for demo purpose - starts a subscription and prints out received data on the topic to stdout
-
+## Basic rAPP for demo purpose
 
 ### Manual build, tag and push to image repo
 
@@ -11,6 +10,62 @@ Build for docker or local kubernetes\
 Build for remote kubernetes - an externally accessible image repo (e.g. docker hub) is needed  \
 `./build.sh <external-image-repo> [<image-tag>]`
 
+## Function
+
+The rApp starts a job subscription and prints (option) the received data to standard out. The purpose with this app is to simulate a real app subscribing to data.
+
+The rapp can be configured to used plain text, plain text SSL or plain text SASL towards kafka.
+
 
 ### Configuration
 
+The container expects the following environment variables:
+
+- APPID : Should be a unique name (for example the name of the POD).
+
+- APPNS : Should be the name of namespace.
+
+- KAFKA_SERVER : Host and port of the kafka bootstrap server.
+
+- TOPIC : The kafka topic where data is delivered by the job.
+
+- ICS : Host and port to the information coordinator server.
+
+The remaining env vars are optional.
+
+- JWT_FILE : File path to mounted file where a valid token is stored. If used, the app expects the file to be regularly updated by a sidecar container. Only for SASL plain text towards kafka.
+
+- SSLPATH : Path to mounted cert and key for secure kafka communication. Only for secure plaintext interface towards kafka.
+
+- GZIP : If set (any value) the payload from kafka is expected to be in gzip format.
+
+- LOG_PAYLOAD : If set (any value) the received payload is printed to standard out.
+
+The following are optional and used only if the app fetches the token instead of a configured sidecar. Only for SASL plain text towards kafka.
+
+- CREDS_GRANT_TYPE : Grant type (keycloak)
+- CREDS_CLIENT_SECRET : Client secret (keycloak)
+- CREDS_CLIENT_ID : Client id (keycloak)
+- AUTH_SERVICE_URL : Url to keycloak for requesting a token.
+
+
+
+The subscription json is expected on the path "/config/jobDefinition.json".
+The rapp set topic and bootstrapserver from the above env vars before subscribing to the data.
+
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
diff --git a/pm-rapp/TODO.txt b/pm-rapp/TODO.txt
deleted file mode 100644 (file)
index a4a0b45..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-
-pm-rapp
-=======
-Dockerfile
-- Remove ref to ubuntu image (if distroless image work ok)
-
-
-
-install/helm
-============
index 581b5c2..9bc6810 100755 (executable)
@@ -18,7 +18,7 @@
 #
 
 # Build image from Dockerfile with/without custom image tag
-# Optionally push to external docker hub repo
+# Optionally push to external image repo
 
 print_usage() {
     echo "Usage: build.sh no-push|<docker-hub-repo-name> [<image-tag>]"
@@ -39,12 +39,30 @@ else
     echo "Attempt to push built image to: "$REPO
 fi
 
-if [ "$2" != "" ]; then
-    IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+shift
+while [ $# -ne 0 ]; do
+    if [ $1 == "--tag" ]; then
+        shift
+        if [ -z "$1" ]; then
+            print_usage
+        fi
+        IMAGE_TAG=$1
+        echo "Setting image tag to: "$IMAGE_TAG
+        shift
+    else
+        echo "Unknown parameter: $1"
+        print_usage
+    fi
+done
 
 IMAGE=$IMAGE_NAME:$IMAGE_TAG
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+    echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
 echo "Building image $IMAGE"
 docker build -t $IMAGE_NAME:$IMAGE_TAG .
 if [ $? -ne 0 ]; then
index 658fb57..1e6fae8 100644 (file)
@@ -17,6 +17,6 @@
 
 # The Jenkins job requires a tag to build the Docker image.
 # By default this file is in the docker build directory,
-# but the location can configured in the JJB template.
+# but the location can be configured in the JJB template.
 ---
 tag: 1.0.0
\ No newline at end of file
index 4113a42..c718add 100644 (file)
@@ -77,7 +77,7 @@ var gzipped_data = os.Getenv("GZIP")
 
 var log_payload = os.Getenv("LOG_PAYLOAD")
 
-// This are optional - if rapp is fethcing the token instead of the side car
+// These are optional - if rapp is fethcing the token instead of the side car
 var creds_grant_type = os.Getenv("CREDS_GRANT_TYPE")
 var creds_client_secret = os.Getenv("CREDS_CLIENT_SECRET")
 var creds_client_id = os.Getenv("CREDS_CLIENT_ID")
@@ -399,7 +399,7 @@ func send_http_request(jsonData []byte, method string, url string, contentType s
        }
        if jwt_file != "" || creds_service_url != "" {
                if accessToken != "" {
-                       req.Header.Add("authorization", accessToken)
+                       req.Header.Set("Authorization", "Bearer "+accessToken)
                } else {
                        log.Error("Cannot create http request for url: ", url, " - token missing")
                        return false, nil
@@ -455,7 +455,7 @@ func send_http_request(jsonData []byte, method string, url string, contentType s
                                        }
                                }
                        } else {
-                               log.Error("Bad response, method: ", method, " url: ", url, " resp: ", resp.StatusCode)
+                               log.Error("Bad response, method: ", method, " url: ", url, " resp: ", resp.StatusCode, " resp: ", resp)
                        }
                }
        }