# ============LICENSE_END=================================================
#
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
COPY main.go .
+RUN go mod init main
+RUN go mod tidy
+
RUN go build -o /pm-https-server
#Replaced distroless image with ubuntu for debug purposes
This server can be used to simulate a RAN node for file download over https.
Files can be requested in three ways:
-- static file (always the same files returned)
+- static file (always the same file returned)
- semi-static files (the requested file must exist in the container)
- generated files (file contents is generated using a template where the start/stop time as well the node name is based on requested file. Counter values are also generated)
### Configuration
-The following env vars (all optional) may be set to control the behavior of the server
+The following env vars (all optional) may be set to control the behaviour of the server
- ALWAYS_RETURN - Name of a file under "/files" in the container that is always returned regardless of requested file on the url `/files/<file-id>`. The can be used when the file contents is not important.
If generated files shall be used, load the file pm-template.xml.gz to the /template-files dir in the container.
-Configure the following for desired behaviou
+Configure the following for desired behaviour
- static file: ALWAYS_RETURN
- semi-static files: none
- generated files: GENERATED_FILE_START_TIME and GENERATED_FILE_TIMEZONE
#
# Build image from Dockerfile with/without custom image tag
-# Optionally push to external docker hub repo
+# Optionally push to external image repo
print_usage() {
echo "Usage: build.sh no-push|<docker-hub-repo-name> [<image-tag>]"
exit 1
}
-if [ $# -ne 1 ] && [ $# -ne 2 ]; then
+if [ $# -lt 1 ] || [ $# -gt 2 ]; then
print_usage
fi
echo "Attempt to push built image to: "$REPO
fi
-if [ "$2" != "" ]; then
- IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+shift
+while [ $# -ne 0 ]; do
+ if [ $1 == "--tag" ]; then
+ shift
+ if [ -z "$1" ]; then
+ print_usage
+ fi
+ IMAGE_TAG=$1
+ echo "Setting image tag to: "$IMAGE_TAG
+ shift
+ else
+ echo "Unknown parameter: $1"
+ print_usage
+ fi
+done
+
+./gen-cert.sh
+
+echo ""
+echo "Certs generated"
IMAGE=$IMAGE_NAME:$IMAGE_TAG
-echo "Building image $IMAGE"
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+ echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
+echo "Building image: $IMAGE with architecture: $DOCKER_DEFAULT_PLATFORM"
+
docker build -t $IMAGE_NAME:$IMAGE_TAG .
+
if [ $? -ne 0 ]; then
echo "BUILD FAILED"
exit 1
+++ /dev/null
-*.crt
-*.key
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIICmDCCAYACCQCzTWEB5G++JDANBgkqhkiG9w0BAQsFADANMQswCQYDVQQGEwJT
+RTAgFw0yMzA1MTcxMjQ2MjVaGA8yMDUwMTAwMTEyNDYyNVowDTELMAkGA1UEBhMC
+U0UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDrFmC9idjCLOYkV0Nb
+R61ZCEBNuLF+UAsZ7GrS+WaME81CuEDZLeHsqwCOiPKf6XF23XHoXqZ+lFY2+Bv0
+uf+FcR3Y7p3BcQn4Ier65ybvHRc/53RspOLdi4TXttEjhGEJsApFsnOSYEkDv/1x
+TU9etwUvjWNdopKQ/fszV6WB1JgLvdSC4US+Do/+V3vUJWW8aJhTfUUtUfyVreJr
++/Rs+lfyMcoBv6d7SsyImO4Lq1Gv4giyKu4D3R9Vvz6CNp2mTKrhgiVbnSKMl1cm
+bkewFPuAvbjxxNGXUWipGug3pJgTxWS8/IyfyQ6TXl5fWvDRt5wrXNbc3nrJVoSs
+1QPFAgMBAAEwDQYJKoZIhvcNAQELBQADggEBACfMN8UZSGzHNsFbiuDoy0hSZzDk
+2ZpJV+BgtH/119lgIRzEEOC749rx68T55B8vKH8go1Pio3sVeDp2bZ7phG9Bcxn9
+hTIN9VJQ9vVnPrnaPzDuQdzY+4FyTMcMXtgHfC7Nu4bYor+rXbqdmv+RrucG9jpg
+uZaLdgtyYK+vpEqLauRYc3wWzyDtV6Td/8r4htxf+zslWvrQ1AXEXf5uuozWTTZJ
+g23vQ243NIQ9MF430QS40uvBBssACeI3NG8aD/OfhWO7TIKr69y+EfHu7i/hcFO1
+LkRj+CMt70TQMULHGeG7CP8VZJNHJz0kCZWj6R+Z9MOA/dVaIiyAS5LI/uY=
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDrFmC9idjCLOYk
+V0NbR61ZCEBNuLF+UAsZ7GrS+WaME81CuEDZLeHsqwCOiPKf6XF23XHoXqZ+lFY2
++Bv0uf+FcR3Y7p3BcQn4Ier65ybvHRc/53RspOLdi4TXttEjhGEJsApFsnOSYEkD
+v/1xTU9etwUvjWNdopKQ/fszV6WB1JgLvdSC4US+Do/+V3vUJWW8aJhTfUUtUfyV
+reJr+/Rs+lfyMcoBv6d7SsyImO4Lq1Gv4giyKu4D3R9Vvz6CNp2mTKrhgiVbnSKM
+l1cmbkewFPuAvbjxxNGXUWipGug3pJgTxWS8/IyfyQ6TXl5fWvDRt5wrXNbc3nrJ
+VoSs1QPFAgMBAAECggEAFlnnUr4DbNrF+tiNH+WdtqRRMNKJlZ/YnltbALoTpOfR
+ETHhgISbQVw0zlh48PlJ/2oohVZScCB8XfeS+N6iS7aohtKRDy5HK19WLwrBKeTT
+LBE+gYHfy/6S38uS8NSKQViKcXv4/wbGimO7ngUisbem95FyzBlD5CMxufzwUHqO
+WlrDpau38ehrNhXR+4gaU5fC0X7/njOYGjWXNmXAlswAs4TUO8gPwZdufzZyadoS
+AVJ9ZGsqn4VlxdIwyY0CiICtdKDmKX7YG3hYKay9LisSm95+Go0rzvPpa4SMWvV9
+tWzzWtdh+MsNbeI0OknXILQk3ifaFzhh/KZRBwOq4QKBgQD9By9qTe0aEyVi0oVH
+6Jy265c/aglQfkU9aIV5WTzF+JsLXYvoOBKT8Bk9GdxEaM9jL+h6BqZqcVqujqxx
+Ntcxks2tBh4glcbnPScOzur+fIYLXcjirZJNx3yft9DfKZjS5lNbKTswHuosGCPK
+LPnuhnz7I5I8aDkFhADeuTOV2QKBgQDt2T99n0Q3QURBu+eQbcUAVzPvJhoOrqvb
+Kc0vRGt9+O3NXEH5Q2YEufDECS1qOtp1XZECCkcMmnAlS7Juz03/rFK+smobmYwM
+deqmA76o2dGLujoQ7w7hJtHFOizUR/DfjrUY1AXbh9qk0MhbudVDGn28/zdYTlXz
+RuIhevoNzQKBgQCzXvOa/ZEWyfnX23uGZX0rI0n+N3JQ8KKvDLiKNNujUEDBRtiW
+j6GD5FJQAVQn3tEd9Gluj+ZLUP5C/nt6arEUwIgzn0GeQe9WIADfO4pVS/tOdXai
+Uv+DSear5wgYG4nuAD+ZQVpnG5NQHPDKMyYelJJnCmlxj0TVByYCvfG2yQKBgQCz
+UQNl8Sobwk/0gvbM04UfgZ784Kvqf9O/Ep/Hz/x+Z6rZFYIDq8WBMLINCaI8oYxL
+ybPmZtsz7Ec6RvyKQC5c4I0tihMnJbcJOekjKlWWtUke39KhK6n7IyopWHetv2Mh
+GFT+F/MmlCDJ+0HirZLT2WgMkhkmsUBpffpEJZPZzQKBgQDQKT183favwULOkQaP
+AGItpQVt3aYyuZYi5veIDlzk3tatAIzya/G6Bqfnpy9CO6yPKdp1pkJxM1xb374T
+z6Mz4A4NPOD2Ofd0WAmiIkP0H1+zfjjtmdBO/9PpASLByNxt2MUdjWvaGk2w7U42
+Eb1duJr2Yud1NfUvKI7dZudF1A==
+-----END PRIVATE KEY-----
# ============LICENSE_END=================================================
#
-#Generate basic cert and key for web server
+#Generate basic cert and key for https server
cat <<__EOF__ | openssl req -new -newkey rsa:2048 -sha256 -nodes -x509 -keyout certs/server.key -out certs/server.crt -days 9999
SE
--- /dev/null
+
+
+## Prerequisites
+
+The ranpm setup works on linux/MacOS or on windows via WSL using a local or remote kubernetes cluster.
+
+- local kubectl
+- kubernetes cluster
+- local docker for building images
+
+It is recommended to run the ranpm on a kubernetes cluster instead of local docker-desktop etc as the setup requires a fair amount of computer resouces.
+
+# Requirement on kubernetes
+
+The demo set can be run on local or remote kubernetes.
+Kubectl must be configured to point to the applicable kubernetes instance.
+Nodeports exposed by the kubernetes instance must be accessible by the local machine - basically the kubernetes control plane IP needs to be accessible from the local machine.
+
+- Latest version of istio install
+
+# Other requirements
+- helm3
+- bash
+- cmd 'envsubst' must be installed (check by cmd: 'type envsubst' )
+- cmd 'jq' must be installed (check by cmd: 'type jq' )
+
+## Before installation
+The following images need to be built manually. If remote or multi node cluster is used, then an image repo needs to be available to push the built images to.
+If external repo is used, use the same repo for all built images and configure the reponame in `helm/global-values.yaml` (the parameter value of extimagerepo shall have a trailing `/`)
+
+Build the following images (build instruction in each dir)
+- ranpm/https-server
+- ranpm/pm-file-converter
+- pm-rapp
+
+
+## Installation
+
+The installation is made by a few scripts.
+The main part of the ranpm is installed by a single script. Then, additional parts can be added on top. All installations in kubernetes is made by helm charts.
+
+The following scripts are provided for installing (install-nrt.sh mush be installed first):
+
+- install-nrt.sh : Installs the main parts of the ranpm setup
+- install-pm-log.sh : Installs the producer for influx db
+- install-pm-influx-job.sh : Sets up an alternative job to produce data stored in influx db.
+- install-pm-rapp.sh : Installs a rapp that subscribe and print out received data
+
+## Unstallation
+
+There is a corresponding uninstall script for each install script. However, it is enough to just run `uninstall-nrt.sh` and `uninstall-pm-rapp.sh´.
+
+## Exposed ports to APIs
+All exposed APIs on individual port numbers (nodeporta) on the address of the kubernetes control plane.
+
+### Keycloak API
+Keycloak API accessed via proxy (proxy is needed to make keycloak issue token with the internal address of keycloak).
+- nodeport: 31784
+
+### OPA rules bundle server
+Server for posting updated OPA rules.
+- nodeport: 32201
+
+### Information coordinator Service
+Direct access to ICS API.
+-nodeports (http and https): 31823, 31824
+
+### Ves-Collector
+Direct access to the Ves-Collector
+- nodeports (http and https): 31760, 31761
+
+## Exposed ports to admin tools
+As part of the ranpm installation, a number of admin tools are installed.
+The tools are accessed via a browser on individual port numbers (nodeports) on the address of the kubernetes control plane.
+
+### Keycload admin console
+Admin tool for keycloak.
+- nodeport : 31788
+- user: admin
+- password: admin
+
+### Redpanda consule
+With this tool the topics, consumer etc can be viewed.
+- nodeport: 31767
+
+### Minio web
+Browser for minio filestore.
+- nodeport: 31768
+- user: admin
+- password: adminadmin
+
+### Influx db
+Browser for influx db.
+- nodeport: 31812
+- user: admin
+- password: mySuP3rS3cr3tT0keN
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+++ /dev/null
-app versions in Chart.yaml
-
-Add parameters to config in values.yaml
-
-Handle password in secured secrets
-
-staging/release images (configurable?) and image pull policy
-
-use other minio user than admin for producers etc
-
-###############################
-
-mc alias set minio http://minio.nonrtric:9000 admin adminadmin
-
-mc admin user add minio testa testatesta
-
# ============LICENSE_END=================================================
#
-dfc:
- clientsecret: Akzki8aSLHL0GVNIx0k1wDrzbB56CVh1
\ No newline at end of file
+global:
+ extimagerepo: bjornmagnussonest/
+ numhttpsservers: 10
#
apiVersion: v2
-name: client
+name: kafka-client
description: Kafka client helm chart
# A chart can be either an 'application' or a 'library' chart.
apiVersion: v1
kind: Pod
metadata:
- name: client
+ name: kafka-client
namespace: nonrtric
labels:
- app: client
+ app: kafka-client
spec:
restartPolicy: Always
containers:
- - name: client
+ - name: kafka-client
image: confluentinc/cp-kafka:7.2.2
command: ['sh', '-c', 'while [ true ];do sleep 60;done']
imagePullPolicy: IfNotPresent
name: data-vol
containers:
- name: minio
- image: minio/minio:RELEASE.2023-02-27T18-10-45Z
+ # Note, in later releases only SSO seem to be possible
+ # so earlier release kept to be able to login with user/pwd
+ image: minio/minio:RELEASE.2022-10-21T22-37-48Z
imagePullPolicy: IfNotPresent
ports:
- name: tcpmain
minio:
opa:
- decisionlogs: false
\ No newline at end of file
+ decisionlogs: true
\ No newline at end of file
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2023 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v2
+name: ves-mr
+description: Ves-collector and message-router
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "0.1.0"
--- /dev/null
+{{/*
+# LICENSE_START=======================================================
+# org.onap.dmaap
+# ================================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Modifications Copyright © 2021-2022 Nordix Foundation
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+*/}}
+##
+## Kafka Connection
+##
+## Items below are passed through to Kafka's producer and consumer
+## configurations (after removing "kafka.")
+## if you want to change request.required.acks it can take this one value
+#kafka.request.required.acks=-1
+kafka.metadata.broker.list=kafka-1-kafka-bootstrap.nonrtric:9092
+config.zk.servers=zoo-entrance.nonrtric:2181
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+## Secured Config
+##
+## Some data stored in the config system is sensitive -- API keys and secrets,
+## for example. to protect it, we use an encryption layer for this section
+## of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+## Kafka expects live connections from the consumer to the broker, which
+## obviously doesn't work over connectionless HTTP requests. The Cambria
+## server proxies HTTP requests into Kafka consumer sessions that are kept
+## around for later re-use. Not doing so is costly for setup per request,
+## which would substantially impact a high volume consumer's performance.
+##
+## This complicates Cambria server failover, because we often need server
+## A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+## This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+default.partitions=3
+default.replicas=3
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+##############################################################################
+##AAF Properties
+forceAAF=false
+useCustomAcls=false
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000
--- /dev/null
+###############################################################################
+##
+## Collector Server config
+##
+## - Default values are shown as commented settings.
+##
+###############################################################################
+##
+## HTTP(S) service
+##
+## Normally:
+##
+## - 8080 is http service
+## - https is disabled by default
+##
+## - At this time, the server always binds to 0.0.0.0
+##
+##
+collector.service.port=8080
+
+## Authentication is only supported via secure port
+## When enabled - require valid keystore defined
+collector.service.secure.port=8443
+
+# auth.method flags:
+#
+# noAuth - default option - no security (http)
+# certBasicAuth - auth by certificate and basic auth username / password (https)
+#auth.method=certBasicAuth
+auth.method=noAuth
+
+## Combination of userid,hashPassword encoded pwd list to be supported
+## userid and pwd comma separated; pipe delimitation between each pair
+## Password is generated by crypt-password library using BCrypt algorithm stored in dcaegen2/sdk package
+## or https://nexus.onap.org/#nexus-search;quick~crypt-password
+header.authlist=sample1,$2a$10$0buh.2WeYwN868YMwnNNEuNEAMNYVU9.FSMJGyIKV3dGET/7oGOi6
+
+## The keystore must be setup per installation when secure port is configured
+collector.keystore.file.location=etc/keystore
+collector.keystore.passwordfile=etc/passwordfile
+
+collector.cert.subject.matcher=etc/certSubjectMatcher.properties
+
+## The truststore must be setup per installation when mutual tls support is configured
+collector.truststore.file.location=etc/truststore
+collector.truststore.passwordfile=etc/trustpasswordfile
+
+## Schema Validation checkflag
+## default no validation checkflag (-1)
+## If enabled (1) - schemafile location must be specified
+collector.schema.checkflag=1
+collector.schema.file={\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\",\"v7\":\"./etc/CommonEventFormat_30.2.1_ONAP.json\"}
+
+## Schema StndDefinedFields Validation checkflag
+## default no validation checkflag (-1)
+## If enabled (1) - schema files locations must be specified, mapping file path must be specified, schema reference path
+## in event json must be specified, path to stndDefined data field in event json must be specified
+collector.externalSchema.checkflag=1
+collector.externalSchema.schemasLocation=./etc/externalRepo/
+collector.externalSchema.mappingFileLocation=./etc/externalRepo/schema-map.json
+event.externalSchema.schemaRefPath=$.event.stndDefinedFields.schemaReference
+event.externalSchema.stndDefinedDataPath=$.event.stndDefinedFields.data
+
+## List all streamid per domain to be supported. The streamid should match to channel name on dmaapfile
+collector.dmaap.streamid=fault=ves-fault|syslog=ves-syslog|heartbeat=ves-heartbeat|measurementsForVfScaling=ves-measurement|mobileFlow=ves-mobileflow|other=ves-other|stateChange=ves-statechange|thresholdCrossingAlert=ves-thresholdCrossingAlert|voiceQuality=ves-voicequality|sipSignaling=ves-sipsignaling|notification=ves-measurement|pnfRegistration=ves-pnfRegistration|3GPP-FaultSupervision=ves-3gpp-fault-supervision|3GPP-Heartbeat=ves-3gpp-heartbeat|3GPP-Provisioning=ves-3gpp-provisioning|3GPP-PerformanceAssurance=ves-3gpp-performance-assurance|o-ran-sc-du-hello-world-pm-streaming-oas3=ves-o-ran-sc-du-hello-world-pm-streaming-oas3
+collector.dmaapfile=etc/ves-dmaap-config.json
+
+## Path to the file containing description of api versions
+collector.description.api.version.location=etc/api_version_description.json
+
+## Event transformation Flag - when set expects configurable transformation
+## defined under ./etc/eventTransform.json
+## Enabled by default; to disable set to 0
+event.transform.flag=1
+
+# Describes at what frequency (measured in minutes) should application try to fetch config from CBS
+collector.dynamic.config.update.frequency=5
--- /dev/null
+<!--
+ ============LICENSE_START=======================================================
+ Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright © 2021-2022 Nordix Foundation
+ ================================================================================
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ============LICENSE_END=========================================================
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="true">
+ <contextName>${module.ajsc.namespace.name}</contextName>
+ <jmxConfigurator />
+ <property name="logDirectory" value="${AJSC_HOME}/log" />
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+ </pattern>
+ </encoder>
+ </appender>
+
+ <appender name="INFO" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ </appender>
+
+ <appender name="DEBUG" class="ch.qos.logback.core.ConsoleAppender">
+
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="ERROR" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+
+
+ <!-- Msgrtr related loggers -->
+ <logger name="org.onap.dmaap.dmf.mr.service" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.service.impl" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.resources" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.resources.streamReaders" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.backends" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.backends.kafka" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.backends.memory" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.beans" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.constants" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.exception" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.listener" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metrics.publisher" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.metrics.publisher.impl" level="TRACE" />
+
+
+
+ <logger name="org.onap.dmaap.dmf.mr.security" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.security.impl" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.transaction" level="TRACE" />
+ <logger name="com.att.dmf.mr.transaction.impl" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="TRACE" />
+
+ <logger name="org.onap.dmaap.dmf.mr.utils" level="TRACE" />
+ <logger name="org.onap.dmaap.mr.filter" level="TRACE" />
+
+ <!--<logger name="com.att.nsa.cambria.*" level="TRACE" />-->
+
+ <!-- Msgrtr loggers in ajsc -->
+ <logger name="org.onap.dmaap.service" level="TRACE" />
+ <logger name="org.onap.dmaap" level="TRACE" />
+
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="TRACE" additivity="false"/>
+ <logger name="org.springframework.beans" level="TRACE" additivity="false"/>
+ <logger name="org.springframework.web" level="TRACE" additivity="false" />
+ <logger name="com.blog.spring.jms" level="TRACE" additivity="false" />
+
+ <!-- AJSC Services (bootstrap services) -->
+ <logger name="ajsc" level="TRACE" additivity="false"/>
+ <logger name="ajsc.RouteMgmtService" level="TRACE" additivity="false"/>
+ <logger name="ajsc.ComputeService" level="TRACE" additivity="false" />
+ <logger name="ajsc.VandelayService" level="TRACE" additivity="false"/>
+ <logger name="ajsc.FilePersistenceService" level="TRACE" additivity="false"/>
+ <logger name="ajsc.UserDefinedJarService" level="TRACE" additivity="false" />
+ <logger name="ajsc.UserDefinedBeansDefService" level="TRACE" additivity="false" />
+ <logger name="ajsc.LoggingConfigurationService" level="TRACE" additivity="false" />
+
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger name="ajsc.utils" level="TRACE" additivity="false"/>
+ <logger name="ajsc.utils.DME2Helper" level="TRACE" additivity="false" />
+ <logger name="ajsc.filters" level="TRACE" additivity="false" />
+ <logger name="ajsc.beans.interceptors" level="TRACE" additivity="false" />
+ <logger name="ajsc.restlet" level="TRACE" additivity="false" />
+ <logger name="ajsc.servlet" level="TRACE" additivity="false" />
+ <logger name="com.att" level="TRACE" additivity="false" />
+ <logger name="com.att.ajsc.csi.logging" level="TRACE" additivity="false" />
+ <logger name="com.att.ajsc.filemonitor" level="TRACE" additivity="false"/>
+
+ <logger name="com.att.nsa.dmaap.util" level="TRACE" additivity="false"/>
+ <logger name="com.att.cadi.filter" level="TRACE" additivity="false" />
+
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="TRACE" additivity="false" />
+ <logger name="org.apache.commons.httpclient" level="TRACE" additivity="false"/>
+ <logger name="org.apache.commons" level="TRACE" additivity="false" />
+ <logger name="org.apache.coyote" level="TRACE" additivity="false"/>
+ <logger name="org.apache.jasper" level="TRACE" additivity="false"/>
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="TRACE" additivity="false" />
+ <logger name="org.apache.cxf" level="TRACE" additivity="false" />
+ <logger name="org.apache.camel.processor.interceptor" level="TRACE" additivity="false"/>
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="TRACE" additivity="false" />
+ <logger name="org.apache.cxf.service" level="TRACE" additivity="false" />
+ <logger name="org.restlet" level="TRACE" additivity="false" />
+ <logger name="org.apache.camel.component.restlet" level="TRACE" additivity="false" />
+ <logger name="org.apache.kafka" level="TRACE" additivity="false" />
+ <logger name="org.apache.zookeeper" level="TRACE" additivity="false" />
+ <logger name="org.I0Itec.zkclient" level="TRACE" additivity="false" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="TRACE" additivity="false"/>
+ <logger name="ch.qos.logback.core" level="TRACE" additivity="false" />
+
+ <!-- logback jms appenders & loggers definition starts here -->
+ <!-- logback jms appenders & loggers definition starts here -->
+ <appender name="auditLogs" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <appender name="perfLogs" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <appender name="ASYNC-audit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>1000</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Audit-Record-Queue" />
+ </appender>
+
+ <logger name="AuditRecord" level="TRACE" additivity="FALSE">
+ <appender-ref ref="STDOUT" />
+ </logger>
+ <logger name="AuditRecord_DirectCall" level="TRACE" additivity="FALSE">
+ <appender-ref ref="STDOUT" />
+ </logger>
+ <appender name="ASYNC-perf" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>1000</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Performance-Tracker-Queue" />
+ </appender>
+ <logger name="PerfTrackerRecord" level="TRACE" additivity="FALSE">
+ <appender-ref ref="ASYNC-perf" />
+ <appender-ref ref="perfLogs" />
+ </logger>
+ <!-- logback jms appenders & loggers definition ends here -->
+
+ <root level="TRACE">
+ <appender-ref ref="DEBUG" />
+ <appender-ref ref="ERROR" />
+ <appender-ref ref="INFO" />
+ <appender-ref ref="STDOUT" />
+ </root>
+
+</configuration>
--- /dev/null
+{
+
+ "ves-measurement": {
+ "type": "message_router",
+ "dmaap_info": {
+ "location": "mtl5",
+ "topic_url": "http://message-router.nonrtric:3904/events/file-ready/",
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2023 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: msgrtrapi
+ namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/MsgRtrApi.properties").AsConfig | nindent 2 }}
# ============LICENSE_END=================================================
#
-nrtpmlog:
- clientsecret: fwbKl26FccceE4godFW8Hkdi5Wi4Lhke
\ No newline at end of file
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: logback
+ namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/logback.xml").AsConfig | nindent 2 }}
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2023 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ves-collector-collector.properties
+ namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/collector.properties").AsConfig | nindent 2 }}
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2023 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ves-collector-ves-dmaap-config.json
+ namespace: nonrtric
+
+data:
+{{ (.Files.Glob "config/ves-dmaap-config.json").AsConfig | nindent 2 }}
--- /dev/null
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: message-router
+ namespace: nonrtric
+ labels:
+ app: message-router
+
+spec:
+ selector:
+ matchLabels:
+ app: message-router
+ serviceName: message-router
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: message-router
+ app: message-router
+ name: message-router
+ spec:
+ containers:
+ - name: message-router
+ image: nexus3.onap.org:10002/onap/dmaap/dmaap-mr:1.4.4
+ imagePullPolicy: Always
+ ports:
+ - containerPort: 3904
+ name: api
+
+ env:
+ # - name: JAASLOGIN
+ # valueFrom:
+ # secretKeyRef:
+ # name: strimzi-kafka-admin
+ # key: sasl.jaas.config
+ # - name: SASLMECH
+ # value: scram-sha-512
+ - name: enableCadi
+ value: "false"
+ - name: useZkTopicStore
+ value: "false"
+ volumeMounts:
+ - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+ subPath: MsgRtrApi.properties
+ name: msgrtrapi
+ - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
+ subPath: logback.xml
+ name: logback
+ volumes:
+ - name: msgrtrapi
+ configMap:
+ name: msgrtrapi
+ - name: logback
+ configMap:
+ name: logback
--- /dev/null
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ves-collector
+ namespace: nonrtric
+ labels:
+ run: ves-collector
+spec:
+ selector:
+ matchLabels:
+ run: ves-collector
+ template:
+ metadata:
+ labels:
+ run: ves-collector
+ spec:
+ volumes:
+ - name: conf-vol1
+ configMap:
+ name: ves-collector-collector.properties
+ - name: conf-vol2
+ configMap:
+ name: ves-collector-ves-dmaap-config.json
+ containers:
+ - name: ves-collector
+ image: nexus3.onap.org:10002/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.12.3
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: DMAAPHOST
+ value: message-router.nonrtric
+ ports:
+ - name: http
+ containerPort: 8080
+ - name: https
+ containerPort: 8443
+ volumeMounts:
+ - name: conf-vol1
+ mountPath: /opt/app/VESCollector/etc/collector.properties
+ subPath: collector.properties
+ - name: conf-vol2
+ mountPath: /opt/app/VESCollector/etc/ves-dmaap-config.json
+ subPath: ves-dmaap-config.json
--- /dev/null
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: zoo-entrance
+ namespace: nonrtric
+ labels:
+ app: zoo-entrance
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: zoo-entrance
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ app: zoo-entrance
+ spec:
+ containers:
+ - name: zoo-entrance
+ image: 'ghcr.io/scholzj/zoo-entrance:latest'
+ command:
+ - /opt/stunnel/stunnel_run.sh
+ ports:
+ - containerPort: 2181
+ name: zoo
+ protocol: TCP
+ env:
+ - name: LOG_LEVEL
+ value: notice
+ - name: STRIMZI_ZOOKEEPER_CONNECT
+ value: 'kafka-1-zookeeper-client:2181'
+ imagePullPolicy: Always
+ livenessProbe:
+ exec:
+ command:
+ - /opt/stunnel/stunnel_healthcheck.sh
+ - '2181'
+ failureThreshold: 3
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ exec:
+ command:
+ - /opt/stunnel/stunnel_healthcheck.sh
+ - '2181'
+ failureThreshold: 3
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ volumeMounts:
+ - mountPath: /etc/cluster-operator-certs/
+ name: cluster-operator-certs
+ - mountPath: /etc/cluster-ca-certs/
+ name: cluster-ca-certs
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: cluster-operator-certs
+ secret:
+ defaultMode: 288
+ secretName: kafka-1-cluster-operator-certs
+ - name: cluster-ca-certs
+ secret:
+ defaultMode: 288
+ secretName: kafka-1-cluster-ca-cert
--- /dev/null
+
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ labels:
+ app: zoo-entrance
+ name: zoo-entrance
+spec:
+ ingress:
+ - from:
+ - podSelector:
+ matchLabels:
+ app: zoo-entrance
+ ports:
+ - port: 2181
+ protocol: TCP
+ podSelector:
+ matchLabels:
+ strimzi.io/name: kafka-1-zookeeper
+ policyTypes:
+ - Ingress
\ No newline at end of file
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: message-router
+ namespace: nonrtric
+ labels:
+ app: message-router
+spec:
+ ports:
+ - port: 3904
+ targetPort: 3904
+ protocol: TCP
+ name: http
+ type: ClusterIP
+ selector:
+ app: message-router
--- /dev/null
+apiVersion: v1
+kind: Service
+metadata:
+ name: ves-collector
+ namespace: nonrtric
+ labels:
+ run: ves-collector
+spec:
+ type: NodePort
+ ports:
+ - port: 8080
+ targetPort: 8080
+ name: http
+ nodePort: 31760
+ - port: 8443
+ name: https
+ nodePort: 31761
+ selector:
+ run: ves-collector
--- /dev/null
+
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: zoo-entrance
+ name: zoo-entrance
+ namespace: nonrtric
+spec:
+ ports:
+ - name: zoo
+ port: 2181
+ protocol: TCP
+ targetPort: 2181
+ selector:
+ app: zoo-entrance
+ type: ClusterIP
+++ /dev/null
-fix unique kafka clientids
\ No newline at end of file
{
- "info_type_id": "PmData",
- "job_owner": "console",
- "job_result_uri": "",
- "job_definition": {
- "filter": {
-
- },
- "deliveryInfo": {
- "topic": "pmreports",
- "bootStrapServers": "kafka-1-kafka-bootstrap.nonrtric:9097"
- }
- }
- }
\ No newline at end of file
+ "info_type_id": "PmData",
+ "job_owner": "console",
+ "job_definition": {
+ "filter": {
+ "sourceNames": [],
+ "measObjInstIds": [],
+ "measTypeSpecs": [
+ {
+ "measuredObjClass": "NRCellDU",
+ "measTypes": [
+ "pmCounterNumber101"
+ ]
+ }
+ ],
+ "measuredEntityDns": []
+ },
+ "deliveryInfo": {
+ "topic": "pmreports",
+ "bootStrapServers": "kafka-1-kafka-bootstrap.nonrtric:9097"
+ }
+ }
+}
\ No newline at end of file
emptyDir: {}
containers:
- name: pmlog
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmlog:1.0.0
- #image: o-ran-sc/nonrtric-plt-pmlog:1.0.0-SNAPSHOT
+# image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmlog:1.0.0
+ image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-pmlog:1.0.0-SNAPSHOT
imagePullPolicy: Always
- #imagePullPolicy: Never
ports:
- name: http
containerPort: 8084
- mountPath: /token-cache
name: token-cache-volume
env:
- - name: APP_INFLUX_ACCESS-TOKEN
+ - name: APP_INFLUX_ACCESSTOKEN
valueFrom:
secretKeyRef:
name: influxdb-api-token
spec:
containers:
- name: pm-rapp
- image: pm-rapp:latest
+ image: {{ .Values.global.extimagerepo }}pm-rapp:latest
+ {{- if .Values.global.extimagerepo }}
+ imagePullPolicy: Always
+ {{- else }}
imagePullPolicy: Never
+ {{- end }}
ports:
- name: http
containerPort: 80
--- /dev/null
+values.yaml
- name: dfc
securityContext:
runAsUser: 0 # Need to run as root - needed when writing to hostpath
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0
- #image: o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0-SNAPSHOT
+# image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0
+ image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-ranpm-datafilecollector:1.0.0-SNAPSHOT
imagePullPolicy: Always
- #imagePullPolicy: Never
ports:
- name: http
containerPort: 8100
spec:
containers:
- name: informationservice
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0
+# image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0
+ image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-informationcoordinatorservice:1.5.0-SNAPSHOT
imagePullPolicy: Always
ports:
- name: http
spec:
containers:
- name: kafka-producer-pm-json2influx
- image: kafka-pm-producer:latest
+ image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+ {{- if .Values.global.extimagerepo }}
+ imagePullPolicy: Always
+ {{- else }}
imagePullPolicy: Never
+ {{- end }}
ports:
- name: http
containerPort: 80
spec:
containers:
- name: kafka-producer-pm-json2kafka
- image: kafka-pm-producer:latest
+ image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+ {{- if .Values.global.extimagerepo }}
+ imagePullPolicy: Always
+ {{- else }}
imagePullPolicy: Never
+ {{- end }}
ports:
- name: http
containerPort: 80
spec:
containers:
- name: kafka-producer-pm-xml2json
- image: kafka-pm-producer:latest
+ image: {{ .Values.global.extimagerepo }}pm-file-converter:latest
+ {{- if .Values.global.extimagerepo }}
+ imagePullPolicy: Always
+ {{- else }}
imagePullPolicy: Never
+ {{- end }}
ports:
- name: http
containerPort: 80
--- /dev/null
+values.yaml
spec:
containers:
- name: pm-producer-json2kafka
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmproducer:1.0.0
- #image: o-ran-sc/nonrtric-plt-pmproducer:1.0.0-SNAPSHOT
+# image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-plt-pmproducer:1.0.0
+ image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-plt-pmproducer:1.0.0-SNAPSHOT
imagePullPolicy: Always
- #imagePullPolicy: Never
ports:
- name: http
containerPort: 8084
#
pmproducerjson2kafka:
- clientsecret: Wy7TuisvAJZ972xG9pRznKfI1gksVx8z
\ No newline at end of file
+ clientsecret: NIIrUK0LisJ0iUVbzOMXtwnnhzjyHtmh
\ No newline at end of file
labels:
app: pm-https-server
spec:
- replicas: 1 # Max 10 = number of generated certs unique ...
+ replicas: {{ .Values.global.numhttpsservers }} # Max 10 = number of generated unique certs ...
serviceName: pm-https-server
selector:
matchLabels:
runAsUser: 0
containers:
- name: pm-https-server
- image: pm-https-server:latest
+ image: {{ .Values.global.extimagerepo }}pm-https-server:latest
+ {{- if .Values.global.extimagerepo }}
+ imagePullPolicy: Always
+ {{- else }}
imagePullPolicy: Never
+ {{- end }}
ports:
- name: http
containerPort: 80
# If env is missing, the file in the call to "/files/<filename> url must exist in the server
- name: ALWAYS_RETURN
value: /ne-files/pm.xml.gz
- # Env must be specified if genetated files use. The value shall spefify the first timestamp of a series of pm files
+ # Env must be specified if generated files use. The value shall specify the first timestamp of a series of pm files
# If a file with a timestamp less than the below will return 404
- # Timestamp shall be gvien with date.time where minutes has values 00,15,45 and the given timezone
+ # Timestamp shall be given with date.time where minutes has values 00,15,45 and the given timezone
# Example: 20230220.1300 - denotes a first file name of 20230220.1300+0100-1315+0100_<node-name>.xml.gz
- name: GENERATED_FILE_START_TIME
- value: "20230220.1300"
+ value: "20230515.0700"
# Timezone to use for generated files. If not given, timezone 0000 will be used
# Shall include +/- sign for the timezone value
- name: GENERATED_FILE_TIMEZONE
SAMELINE="\033[0K\r"
# Variables
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
if [ $? -ne 0 ]; then
- echo $KHOST
+ echo $KUBERNETESHOST
echo "Exiting"
exit 1
fi
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
-echo "Kubernetes control plane host: $KHOST"
+echo "Kubernetes control plane host: $KUBERNETESHOST"
echo "Host obtained from current kubectl context"
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
+echo "Checking requirements"
+echo " Checking if istio is installed"
+kubectl $KUBECONF get authorizationpolicies -A &> /dev/null
+if [ $? -ne 0 ]; then
+ echo " Istio api: kubectl get authorizationpolicies is not installed"
+ exit 1
+else
+ echo " OK"
+fi
+echo " Checking if jq is installed"
+tmp=$(type jq)
+if [ $? -ne 0 ]; then
+ echo " Command utility jq (cmd-line json processor) is not installed"
+ exit 1
+else
+ echo " OK"
+fi
+echo " Checking if envsubst is installed"
+tmp=$(type envsubst)
+if [ $? -ne 0 ]; then
+ echo " Command utility envsubst (env var substitution in files) is not installed"
+ exit 1
+else
+ echo " OK"
+fi
+
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo "Restarting istiod, workaround to refresh jwks cache"
kubectl rollout restart deployments/istiod -n istio-system
# Create realm in keycloak
-##export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
. scripts/populate_keycloak.sh
create_realms nonrtric-realm
generate_client_secrets nonrtric-realm $cid
check_error $?
-# retcode=0
-# while [ $retcode -eq 0 ]; do
-# #NRT_REALM_JWKS=$(kubectl exec -n nonrtric client -- curl -f http://keycloak.nonrtric:8080/realms/nonrtric-realm/protocol/openid-connect/certs)
-# NRT_REALM_JWKS=$(curl -fs localhost:31788/realms/nonrtric-realm/protocol/openid-connect/certs)
-# if [ $? -eq 0 ]; then
-# retcode=1
-# #echo $NRT_REALM_JWKS
-# echo "JWKS for nonrtric-realm obtained"
-# else
-# sleep 3
-# echo "Wating for keycloak to publish JWKS for nonrtric-realm"
-# fi
-# done
-
-# export NRT_REALM_JWKS
-
echo ""
-# ##################################################################################
-# echo "##### Installing chart httpecho"
-# ##################################################################################
-
-#helm install --wait -n nonrtric httpecho helm/httpecho
-
-
-
-# TSEC=$SECONDS
-# numok=0
-# while [ $numok -lt 10 ]; do
-# echo ""
-# echo "Time: $(($SECONDS-$TSEC))"
-# cid="console-setup"
-# __get_admin_token
-# TOKEN=$(get_client_token nonrtric-realm $cid)
-# decode_token "$TOKEN"
-# curl -fv localhost:31789/ -H "Authorization: Bearer $TOKEN"
-# if [ $? -eq 0 ]; then
-# let numok=numok+1
-# fi
-# sleep 5
-# done
-
-# TSEC=$SECONDS
-# while [ true ]; do
-# echo ""
-# echo "Time: $(($SECONDS-$TSEC))"
-# cid="console-setup"
-# __get_admin_token
-# TOKEN=$(get_client_token nonrtric-realm $cid)
-# decode_token "$TOKEN"
-# curl -v localhost:31789/ -H "Authorization: Bearer $TOKEN"
-# sleep 5
-# done
cid="console-setup"
__get_admin_token
TOKEN=$(get_client_token nonrtric-realm $cid)
-decode_token "$TOKEN"
##################################################################################
echo "##### Installing charts: strimzi and nrt-base-1"
cp opa-rules/bundle.tar.gz helm/nrt-base-1/charts/opa-rule-db/data
-#envsubst < helm/nrt-base-1/charts/httpecho/values-template.yaml > helm/nrt-base-1/charts/httpecho/values.yaml
-
helm install -n nonrtric nrt-base-1 helm/nrt-base-1
-
+echo "Waiting for influx db - there may be error messages while trying..."
retcode=1
while [ $retcode -eq 1 ]; do
retcode=0
sleep 1
elif [ "$CONFIG" == "{}" ]; then
echo "Configuring db"
- kubectl exec -n nonrtric influxdb2-0 -- influx setup -u bm -p mySuP3rS3cr3tT0keN -o est -b pm-bucket -f
+ kubectl exec -n nonrtric influxdb2-0 -- influx setup -u admin -p mySuP3rS3cr3tT0keN -o est -b pm-bucket -f
if [ $? -ne 0 ]; then
retcode=1
sleep 1
done
# Save influx user api-token to secret
-INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric)
-INFLUXDB2_TOKEN=$(echo -n $INFLUXDB2_TOKEN | base64)
-PATCHDATA='[{"op": "add", "path": "/data/token", "value": "'$INFLUXDB2_TOKEN'" }]'
+INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric | base64)
+PATCHDATA='[{"op": "add", "path": "/data/token", "value": "'$INFLUXDB2_TOKEN'"}]'
kubectl patch secret influxdb-api-token -n nonrtric --type json -p "$PATCHDATA"
-
echo "Wait for kafka"
_ts=$SECONDS
until $(kubectl exec -n nonrtric kafka-client -- kafka-topics --list --bootstrap-server kafka-1-kafka-bootstrap.nonrtric:9092 1> /dev/null 2> /dev/null); do
./helm/ran/certs/gen-certs.sh 10
check_error $?
-helm install --wait -n ran ran helm/ran
+helm install --wait -f helm/global-values.yaml -n ran ran helm/ran
echo ""
envsubst < helm/nrt-pm/charts/dfc/values-template.yaml > helm/nrt-pm/charts/dfc/values.yaml
-
-#envsubst < helm/nrt-pm/charts/ics/values-template.yaml > helm/nrt-pm/charts/ics/values.yaml
-
-helm install --wait -n nonrtric nrt-pm helm/nrt-pm
+helm install --wait -f helm/global-values.yaml -n nonrtric nrt-pm helm/nrt-pm
echo ""
fi
}
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
if [ $? -ne 0 ]; then
- echo $KHOST
+ echo $KUBERNETESHOST
echo "Exiting"
exit 1
fi
-echo "Kubernetes control plane host: $KHOST"
+echo "Kubernetes control plane host: $KUBERNETESHOST"
. scripts/kube_get_nodeport.sh
. scripts/get_influxdb2_token.sh
echo "Creating bucket $bucket in influxdb2"
create_influxdb2_bucket influxdb2-0 nonrtric $bucket
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
. scripts/populate_keycloak.sh
cid="console-setup"
echo "Installing pmlog"
# Variables
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
if [ $? -ne 0 ]; then
- echo $KHOST
+ echo $KUBERNETESHOST
echo "Exiting"
exit 1
fi
fi
}
-# echo " Retriving influxdb2 access token..."
-# export INFLUXDB2_TOKEN=$(get_influxdb2_token influxdb2-0 nonrtric)
-
-# envsubst < nrt-pm-log/values-template.yaml > nrt-pm-log/values.yaml
-
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
. scripts/populate_keycloak.sh
cid="nrt-pm-log"
-echo "Installtion pmrapp"
+echo "Installing pmrapp"
. scripts/kube_get_controlplane_host.sh
. scripts/kube_get_nodeport.sh
echo "Creating client in keycloak"
# Find host and port to keycloak
-export KHOST=$(kube_get_controlplane_host)
+export KUBERNETESHOST=$(kube_get_controlplane_host)
if [ $? -ne 0 ]; then
- echo $KHOST
+ echo $KUBERNETESHOST
echo "Exiting"
exit 1
fi
-create_topic kafka-1-kafka-bootstrap.nonrtric:9092 pm-rapp 10
-
-export KC_PORT=$(kube_get_nodeport keycloak nonrtric http)
+create_topic kafka-1-kafka-bootstrap.nonrtric:9092 rapp-topic 10
. scripts/populate_keycloak.sh
envsubst < helm/nrt-pm-rapp/values-template.yaml > helm/nrt-pm-rapp/values.yaml
echo " helm install..."
-helm install --wait -n nonrtric nrt-pm-rapp helm/nrt-pm-rapp
+helm install --wait -f helm/global-values.yaml -n nonrtric nrt-pm-rapp helm/nrt-pm-rapp
echo "done"
-# Build buundle
+# Build bundle
tar cvf bundle.tar rules data.json
gzip bundle.tar
+# Installation
+
+The bundle is installed as part of the install-nrt.sh script.
+
## License
--- /dev/null
+
+## General
+
+This folder contains scripts to push pm event to the file-ready kafka topic, directly to the topic or via the ves-collector.
+For simple testing, pushing directly to the topic is much faster but if integration with e.g. ran simulators; pushing via the ves-collector might be a better option.
+
+
+## Script - push-genfiles-to-file-ready-topic.sh
+
+This script push generated pm files directly to the file-ready kafka topic.
+
+The script pushes events from one or more RAN nodes. For each RAN node, one or more events are pushed in sequence where each event in the sequence represents a 15 min measurement.
+Although each pm-file represents a 15 min measurement period, all events are pushed in a sequence without any delay between the files.
+The RAN nodes are specified as a single base name, eg. "NODE-A" and the the script with name the nodes "NODE-A-0", "NODE-A-1" etc.
+The event will contain a url to each pm file. These urls points to a web server (simulating a RAN node). The number of web server are fixed so the generated urls are generated so the load is spread out over the web server.
+
+### Parmeters
+
+`push-genfiles-to-file-ready-topic.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"`
+
+- node-count : The number of simulated RAN nodes
+- number-of-events : The number of 15 min measurements (event) per node
+- node-name-base : Base name of the RAN nodes, index 0,1,2 etc will be added to the name
+- file-extension : The pm file extension - should match the actual pm file to be downloaded from the web-servers (simulated RAN nodes)
+- sftp|ftps|https - Protocol for downloading pm files - only https is currently supported
+- num-servers - The number of web servers for pm file download. Should match the number of web servers actually started by the install script. This script generates pm file url to one of the web servers to spread the load. Note that this number can be different from the node-count parameter.
+- hist : By default, each event only contains the reference to a single pm file. If the parameter is given then each event will contain the latest pm file and 95 of the previous file to represent a full 24h set of pm files.
+
+
+## Script - push-genfiles-to-ves-collector.sh
+
+This script push generated pm files via the ves-collector to the file-ready kafka topic.
+The parameter are exactly same as for the `push-genfiles-to-file-ready-topic.sh` script.
+
+## Typical usage
+
+The below example
+
+`<script> 20 100 GNODEB xml.gz https 10`
+
+This will push 100 events (file ready events) for each of the 20 nodes. The timestamp of the first files for the 20 node will be as configured in this script. Each following events will have a timestamp of 15 minutes later.
+The nodes will be named `GNODEB-0 -- GNODEB-19`.
+The file format of the file url will use xml.gz (the corresponding files are expected to exist or be generated by the server url).
+Files will be downloaded by the datafile collector using https.
+The ranpm is setup with 10 https servers (ran simulators) where the files will be downloaded. Note that this number should be equal to or less than the number of https servers.
+
+The following file names will be present in the sequence of events - a total of 200 event (20 *100):
+
+`A20230515.0700+0100-0715+0100_GNODEB-0.xml.gz`
+`A20230515.0700+0100-0715+0100_GNODEB-1.xml.gz` \
+`...`\
+`A20230516.0745+0100-0800+0100_GNODEB-18.xml.gz`
+`A20230516.0745+0100-0800+0100_GNODEB-19.xml.gz`
+
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
# args: <job-id> <job-index-suffix> [<access-token>]
# job file shall exist in file "".job.json"
create_ics_job() {
+
+ ICS_PROXY_PORT=$(kubectl get svc -n nonrtric informationservice --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
+ echo "NodePort to ics: "$ICS_PROXY_PORT
JOB=$(<.job.json)
echo $JOB
retcode=1
else
__bearer="Authorization: Bearer $TOKEN"
fi
- STAT=$(curl -s -X PUT -w '%{http_code}' -H accept:application/json -H Content-Type:application/json http://$KHOST:$(kube_get_nodeport informationservice nonrtric http)/data-consumer/v1/info-jobs/job-$1"-"$2 --data-binary @.job.json -H "$__bearer" )
+ STAT=$(curl -s -X PUT -w '%{http_code}' -H accept:application/json -H Content-Type:application/json http://$KUBERNETESHOST:$ICS_PROXY_PORT/data-consumer/v1/info-jobs/job-$1"-"$2 --data-binary @.job.json -H "$__bearer" )
retcode=$?
echo "curl return code: $retcode"
if [ $retcode -eq 0 ]; then
echo "Creating topic: $topic with $partitions partition(s) in $kafka"
- kubectl exec -it client -n nonrtric -- bash -c 'kafka-topics --create --topic '$topic' --partitions '$partitions' --bootstrap-server '$kafka
+ kubectl exec -it kafka-client -n nonrtric -- bash -c 'kafka-topics --create --topic '$topic' --partitions '$partitions' --bootstrap-server '$kafka
return $?
}
sleep 1
fi
done
- echo $__influxdb2_access_token
+ echo -n $__influxdb2_access_token
return 0
}
\ No newline at end of file
cd $SD
CWD=$PWD
-NODE_COUNT=$1
-EVT_COUNT=$2
-NODE_NAME_BASE=$3
-FILE_EXT=$4
-TYPE=$5
-SRV_COUNT=$6
-HIST=$7
+VARIANT=$1
+NODE_COUNT=$2
+EVT_COUNT=$3
+NODE_NAME_BASE=$4
+FILE_EXT=$5
+TYPE=$6
+SRV_COUNT=$7
+HIST=$8
FTPES_PORT=2021
SFTP_PORT=2022
HTTPS_PORT=443
print_usage() {
- echo "Usage: kafka-client-send-genfiles-file-ready.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
+ echo "Usage: kafka-client-send-genfiles-file-ready.sh ves|file-ready <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
exit 1
}
echo $@
-if [ $# -lt 6 ] && [ $# -gt 7 ]; then
+if [ $# -lt 7 ] && [ $# -gt 8 ]; then
print_usage
fi
fi
HIST_LEN=96
fi
+PUSHMSG=""
+if [ $VARIANT == "ves" ]; then
+ PUSHMSG="to ves-collector"
+elif [ $VARIANT == "file-ready" ]; then
+ PUSHMSG="to file-ready topic"
+fi
-# Unix time of 20230220.1300
+# Unix time of 20230515.0700
# If the value is changed, make sure to set the same time to the env var GENERATED_FILE_START_TIME in kube-plt.yaml for the https-server
-BEGINTIME=1676898000
+BEGINTIME=1684134000
# Time zone
# If the value is changed, make sure to set the same value to the env var GENERATED_FILE_TIMEZONE in kube-plt.yaml for the https-server
TIMEZONE="+0100"
CURTIME=$BEGINTIME
BATCHSIZE=1000
+if [ $VARIANT == "ves" ]; then
+ BATCHSIZE=20
+fi
CNTR=0
TCNTR=0
if [ $CNTR -eq 0 ]; then
rm .out.json
touch .out.json
+ if [ $VARIANT == "ves" ]; then
+ echo '{"eventList": [' > .out.json
+ fi
fi
if [ "$HIST" == "" ]; then
if [ "$HIST" == "" ]; then
NO="$NODE_NAME_BASE-$j"
- #FN="A20000626.2315+0200-2330+0200_$NO-$i.$FILE_EXT"
FN="A$ST$TIMEZONE-$ET${TIMEZONE}_$NO.$FILE_EXT"
let SRV_ID=$j%$SRV_COUNT
- #let SRV_ID=SRV_ID+1
echo "NODE "$NO
echo "FILENAME "$FN
echo "HTTP SERVER "$SRV
URL="https://$SRV:$HTTPS_PORT/generatedfiles/$FN"
fi
- EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+
+ if [ $VARIANT == "ves" ] && [ $CNTR -gt 0 ]; then
+ echo "," >> .out.json
+ fi
+ if [ $VARIANT == "ves" ]; then
+ EVT='{"commonEventHeader":{"domain":"notification","sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","eventId":"FileReady_'$TCNTR'","priority":"Normal","version":"4.0.1","vesEventListenerVersion":"7.0.1","sourceName":"'$NO'","reportingEntityName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'"},"notificationFields":{"notificationFieldsVersion":"2.0","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}'
+ else
+ EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+ fi
echo $EVT >> .out.json
else
NO="$NODE_NAME_BASE-$j"
let SRV_ID=$j%$SRV_COUNT
- #let SRV_ID=SRV_ID+1
echo "NODE "$NO
EVT_FRAG=""
if [ $FID -lt 0 ]; then
FN="NONEXISTING_$NO.$FILE_EXT"
else
- #FN="A20000626.2315+0200-2330+0200_$NO-$FID.$FILE_EXT"
FN="A$ST$TIMEZONE-$ET${TIMEZONE}_$NO.$FILE_EXT"
fi
echo "FILENAME "$FN
- if [ $TYPE == "sftp" ]; then
- SRV="ftp-sftp-$SRV_ID"
- #echo "FTP SERVER "$SRV
- URL="sftp://onap:pano@$SRV:$SFTP_PORT/$FN"
- elif [ $TYPE == "ftpes" ]; then
- SRV="ftp-ftpes-$SRV_ID"
- #echo "FTP SERVER "$SRV
- URL="ftpes://onap:pano@$SRV:$FTPES_PORT/$FN"
- elif [ $TYPE == "https" ]; then
- SRV="pm-https-server-$SRV_ID.pm-https-server.ran"
- #echo "HTTP SERVER "$SRV
- URL="https://$SRV:$HTTPS_PORT/files/$FN"
- fi
+ SRV="pm-https-server-$SRV_ID.pm-https-server.ran"
+ URL="https://$SRV:$HTTPS_PORT/files/$FN"
if [ "$EVT_FRAG" != "" ]; then
EVT_FRAG=$EVT_FRAG","
fi
EVT_FRAG=$EVT_FRAG'{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}'
done
- EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":['$EVT_FRAG']}}}'
+ if [ $VARIANT == "ves" ] && [ $CNTR -gt 0 ]; then
+ echo "," >> .out.json
+ fi
+ if [ $VARIANT == "ves" ]; then
+ EVT='{"commonEventHeader":{"domain":"notification","sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","eventId":"FileReady_'$TCNTR'","priority":"Normal","version":"4.0.1","vesEventListenerVersion":"7.0.1","sourceName":"'$NO'","reportingEntityName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'"},"notificationFields":{"notificationFieldsVersion":"2.0","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}'
+ else
+ EVT='{"event":{"commonEventHeader":{"sequence":0,"eventName":"Noti_RnNode-Ericsson_FileReady","sourceName":"'$NO'","lastEpochMicrosec":'$CURTIMEMS',"startEpochMicrosec":'$STTIMEMS',"timeZoneOffset":"UTC'$TIMEZONE'","changeIdentifier":"PM_MEAS_FILES"},"notificationFields":{"notificationFieldsVersion":"notificationFieldsVersion","changeType":"FileReady","changeIdentifier":"PM_MEAS_FILES","arrayOfNamedHashMap":[{"name":"'$FN'","hashMap":{"fileFormatType":"org.3GPP.32.435#measCollec","location":"'$URL'","fileFormatVersion":"V10","compression":"gzip"}}]}}}'
+ fi
echo $EVT >> .out.json
-
fi
let CNTR=CNTR+1
let TCNTR=TCNTR+1
if [ $CNTR -ge $BATCHSIZE ]; then
- echo "Pushing batch of $CNTR events"
- cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+ echo "Pushing batch of $CNTR events $PUSHMSG"
+ if [ $VARIANT == "ves" ]; then
+ echo ']}' >> .out.json
+ curl -s -X POST http://ves-collector.nonrtric:8080/eventListener/v7/eventBatch --header 'Content-Type: application/json' --data-binary @.out.json
+ else
+ cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+ fi
rm .out.json
touch .out.json
+ if [ $VARIANT == "ves" ]; then
+ echo '{"eventList": [' > .out.json
+ fi
CNTR=0
fi
done
done
if [ $CNTR -ne 0 ]; then
- echo "Pushing batch of $CNTR events"
- cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+ echo "Pushing batch of $CNTR events $PUSHMSG"
+ if [ $VARIANT == "ves" ]; then
+ echo ']}' >> .out.json
+ curl -s -X POST http://ves-collector.nonrtric:8080/eventListener/v7/eventBatch --header 'Content-Type: application/json' --data-binary @.out.json
+ else
+ cat .out.json | kafka-console-producer --topic file-ready --broker-list kafka-1-kafka-bootstrap.nonrtric:9092
+ fi
fi
echo "Pushed $TCNTR events"
# Script intended to be sourced by other script to add functions to the keycloak rest API
-echo "Cluster ip: $KHOST"
+echo "Cluster ip: $KUBERNETESHOST"
-echo "Keycloak nodeport: $KC_PORT"
-
-#KC_URL="http://$KHOST:$KC_PORT"
KC_URL=http://keycloak.nonrtric:8080
echo "Keycloak url: "$KC_URL
+KC_PROXY_PORT=$(kubectl get svc -n nonrtric keycloak-proxy --output jsonpath='{.spec.ports[?(@.name=="http")].nodePort}')
+echo "Nodeport to keycloak proxy: "$KC_PROXY_PORT
+
__get_admin_token() {
echo "Get admin token"
ADMIN_TOKEN=""
while [ "${#ADMIN_TOKEN}" -lt 20 ]; do
- ADMIN_TOKEN=$(curl --proxy localhost:31784 -s -X POST --max-time 2 "$KC_URL/realms/master/protocol/openid-connect/token" -H "Content-Type: application/x-www-form-urlencoded" -d "username=admin" -d "password=admin" -d 'grant_type=password' -d "client_id=admin-cli" | jq -r '.access_token')
+ ADMIN_TOKEN=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s -X POST --max-time 2 "$KC_URL/realms/master/protocol/openid-connect/token" -H "Content-Type: application/x-www-form-urlencoded" -d "username=admin" -d "password=admin" -d 'grant_type=password' -d "client_id=admin-cli" | jq -r '.access_token')
if [ "${#ADMIN_TOKEN}" -lt 20 ]; then
echo "Could not get admin token, retrying..."
echo "Retrieved token: $ADMIN_TOKEN"
decode_token() {
echo "Decoding access_token"
echo $1 | jq -R 'split(".") | .[0,1] | @base64d | fromjson'
- #echo $1 | jq -r .access_token | jq -R 'split(".") | .[1] | @base64d | fromjson'
}
decode_jwt() {
list_realms() {
echo "Listing all realms"
- curl --proxy localhost:31784 -s \
+ __check_admin_token
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms" | jq -r '.[].id' | indent2
echo "$@"
for realm in "$@"; do
echo "Attempt to delete realm: $realm"
- curl --proxy localhost:31784 -s \
+ __check_admin_token
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X DELETE \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$realm" | indent1
echo "Creating realms: $@"
while [ $# -gt 0 ]; do
echo " Attempt to create realm: $1"
-
+ __check_admin_token
cat > .jsonfile1 <<- "EOF"
{
"realm":"$__realm_name",
EOF
export __realm_name=$1
envsubst < .jsonfile1 > .jsonfile2
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
__realm=$1
shift
echo "Attempt to create clients $@ for realm: $__realm"
- __check_admin_token
cat > .jsonfile1 <<- "EOF"
{
EOF
while [ $# -gt 0 ]; do
echo " Creating client: $1"
+ __check_admin_token
export __client_name=$1
envsubst < .jsonfile1 > .jsonfile2
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
}
__get_client_id() {
- __client_data=$(curl --proxy localhost:31784 -s \
+ __client_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$1/clients?clientId=$2")
__realm=$1
shift
echo "Attempt to generate secret for clients $@ in realm $__realm"
- __check_admin_token
while [ $# -gt 0 ]; do
+ __check_admin_token
__client_id=$(__get_client_id $__realm $1)
if [ $? -ne 0 ]; then
echo "Command failed"
fi
echo " Client id for client $1 in realm $__realm: "$__client_id | indent1
echo " Creating secret"
- __client_secret=$(curl --proxy localhost:31784 -s \
+ __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
echo "Command failed"
exit 1
fi
- __client_secret=$(curl --proxy localhost:31784 -s \
+ __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
create_client_roles() {
# <realm-name> <client-name> [<role-name>]+
+ __check_admin_token
__client_id=$(__get_client_id $1 $2)
if [ $? -ne 0 ]; then
echo "Command failed"
EOF
export __role=$1
envsubst < .jsonfile1 > .jsonfile2
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
__get_service_account_id() {
# <realm-name> <client-id>
- __service_account_data=$(curl --proxy localhost:31784 -s \
+ __service_account_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$1/clients/$2/service-account-user")
return 0
}
-# curl --proxy localhost:31784 -s \
+# curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
# -X GET \
# -H "Authorization: Bearer ${ADMIN_TOKEN}" \
# "$KC_URL/admin/realms/$__realm/users/$__service_account_id/role-mappings/clients/$__client_id/available"
__get_client_available_role_id() {
# <realm-name> <service-account-id> <client-id> <client-role-name>
- __client_role_data=$(curl --proxy localhost:31784 -s \
+ __client_role_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$1/users/$2/role-mappings/clients/$3/available")
__get_client_mapped_role_id() {
# <realm-name> <service-account-id> <client-id> <client-role-name>
- __client_role_data=$(curl --proxy localhost:31784 -s \
+ __client_role_data=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$1/users/$2/role-mappings/clients/$3")
add_client_roles_mapping() {
# <realm-name> <client-name> [<role-name>]+
echo "Attempt to add roles ${@:3} to client $2 in realm $1"
+ __check_admin_token
__realm=$1
__client=$2
__client_id=$(__get_client_id $__realm $__client)
echo "]" >> .jsonfile2
echo " Adding roles $__all_roles to client $__client in realm $__realm"
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
remove_client_roles_mapping() {
# <realm-name> <client-name> [<role-name>]+
echo "Attempt to removed roles ${@:3} from client $2 in realm $1"
+ __check_admin_token
__realm=$1
__client=$2
__client_id=$(__get_client_id $__realm $__client)
echo "]" >> .jsonfile2
echo " Removing roles $__all_roles from client $__client in realm $__realm"
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X DELETE \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
add_client_hardcoded-claim-mapper() {
# <realm-name> <client-name> <mapper-name> <claim-name> <claim-value>
+ __check_admin_token
__realm=$1
__client=$2
export __mapper_name=$3
export __claim_name=$4
export __claim_value=$5
-set -x
+
__client_id=$(__get_client_id $__realm $__client)
if [ $? -ne 0 ]; then
echo " Fatal error when getting client id, response: "$?
}
EOF
envsubst < .jsonfile1 > .jsonfile2
- curl --proxy localhost:31784 -s \
+ curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s \
-X POST \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
-H "Content-Type: application/json" \
# Get a client token
# args: <realm-name> <client-name>
get_client_token() {
+ __check_admin_token
__realm=$1
__client=$2
__client_id=$(__get_client_id $__realm $__client)
fi
#echo " Client id for client $__client in realm $__realm: "$__client_id | indent1
- __client_secret=$(curl --proxy localhost:31784 -s -f \
+ __client_secret=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -s -f \
-X GET \
-H "Authorization: Bearer ${ADMIN_TOKEN}" \
"$KC_URL/admin/realms/$__realm/clients/$__client_id/client-secret")
__client_secret=$(echo $__client_secret | jq -r .value)
- __TMP_TOKEN=$(curl --proxy localhost:31784 -f -s -X POST $KC_URL/realms/$__realm/protocol/openid-connect/token \
+ __TMP_TOKEN=$(curl --proxy $KUBERNETESHOST:$KC_PROXY_PORT -f -s -X POST $KC_URL/realms/$__realm/protocol/openid-connect/token \
-H Content-Type:application/x-www-form-urlencoded \
-d client_id="$__client" -d client_secret="$__client_secret" -d grant_type=client_credentials)
if [ $? -ne 0 ]; then
fi
chmod +x kafka-client-send-genfiles-file-ready.sh
-kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/client:/home/appuser
+kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/kafka-client:/home/appuser
-kubectl exec client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
+kubectl exec kafka-client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh file-ready '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
echo done
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2023 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+SD=$(dirname -- "$0")
+echo "${0##*/} script-home: "$SD
+cd $SD
+CWD=$PWD
+
+
+NODE_COUNT=$1
+EVT_COUNT=$2
+NODE_NAME_BASE=$3
+FILE_EXT=$4
+TYPE=$5
+SRV_COUNT=$6
+HIST=$7
+
+print_usage() {
+ echo "Usage: push-genfiles-to-ves-collector.sh <node-count> <num-of-events> <node-name-base> <file-extension> sftp|ftpes|https <num-servers> [hist]"
+ exit 1
+}
+if [ $# -lt 6 ] || [ $# -gt 7 ]; then
+ print_usage
+fi
+
+if [ $TYPE == "sftp" ]; then
+ echo "sftp servers not yet supported"
+elif [ $TYPE == "ftpes" ]; then
+ echo "ftpes servers not yet supported"
+elif [ $TYPE == "https" ]; then
+ :
+else
+ print_usage
+fi
+
+if [ $FILE_EXT != "xml.gz" ]; then
+ echo "only xml.gz format supported"
+ print_usage
+fi
+
+if [ ! -z "$HIST" ]; then
+ if [ $HIST != "hist" ]; then
+ print_usage
+ fi
+fi
+
+if [ "$KUBECONFIG" == "" ]; then
+ echo "Env var KUBECONFIG not set, using current settings for kubectl"
+else
+ echo "Env var KUBECONFIG set to $KUBECONFIG"
+fi
+
+chmod +x kafka-client-send-genfiles-file-ready.sh
+kubectl cp kafka-client-send-genfiles-file-ready.sh nonrtric/kafka-client:/home/appuser
+
+kubectl exec kafka-client -n nonrtric -- bash -c './kafka-client-send-genfiles-file-ready.sh ves '$NODE_COUNT' '$EVT_COUNT' '$NODE_NAME_BASE' '$FILE_EXT' '$TYPE' '$SRV_COUNT' '$HIST
+
+echo done
+
helm repo remove strimzi
helm uninstall -n nonrtric strimzi-kafka-crds
-#kubectl delete -f 'https://strimzi.io/install/latest?namespace=nonrtric' -n nonrtric
check_error $? "$INST"
helm uninstall namespaces
BCK_*
REM_*
-*.crt
-*.key
+
# ============LICENSE_END=================================================
#
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
COPY main.go .
-RUN go build -o /kafka-pm-producer
+RUN go mod init main
+RUN go mod tidy
+
+RUN go build -o /pm-file-converter
#Replaced distroless image with ubuntu for debug purpose
#FROM gcr.io/distroless/base-debian11
FROM ubuntu
WORKDIR /
## Copy from "build" stage
-COPY --from=build /kafka-pm-producer .
+COPY --from=build /pm-file-converter .
COPY server.key /server.key
COPY server.crt /server.crt
##Uncomment this when using distroless image
#USER nonroot:nonroot
-ENTRYPOINT ["/kafka-pm-producer"]
+ENTRYPOINT ["/pm-file-converter"]
+## PM Data Producer
-## Producer
-
-Producer supporting data types for pm xml to json conversion and pm json filtering
-
+### Manual build, tag and push to image repo
Build for docker or local kubernetes\
`./build.sh no-push`
Build for remote kubernetes - an externally accessible image repo (e.g. docker hub) is needed \
`./build.sh <external-image-repo>`
+### Function
+
+Producer supporting data types for pm xml to json conversion, pm json filtering with output to kafka or influx db.
+
+### Configuration
+
+The app expects the following environment variables:
+
+- CREDS_GRANT_TYPE : Grant type (keycloak)
+- CREDS_CLIENT_SECRET: Client secret (keycloak)
+- CREDS_CLIENT_ID : Client id (keycloak)
+- AUTH_SERVICE_URL : Url to keycloak for fetching tokens
+- KAFKA_SERVER : Host and port to kafka bootstrap server
+- ICS : Host and port to the Information Coordination Service
+- SELF: Host and port of this app
+
+The following env vars are optional
+FILES_VOLUME : Path to persistent file storage (optional)
+FILESTORE_USER : Minio filestore user
+FILESTORE_PWD : Minio filestore password
+FILESTORE_SERVER: Host and port of the minio filestore
+KP : Id of the app
+
+The app can be configured to read file from a mounted file system or from a filestore server (minio).
+
+Mounted files:
+Configure ´FILES_VOLUME´ and leave var starting with FILESTORE empty.
+
+Filestore:
+Configure env var starting with FILESTORE and leave ´FILES_VOLUME´empty.
+
+
## License
#
#Build image from Dockerfile with/without custom image tag
-#Optionally push to external docker hub repo
+#Optionally push to external image repo
print_usage() {
- echo "Usage: build.sh no-push|bm|<docker-hub-repo-name> [<image-tag>]"
+ echo "Usage: build.sh no-push|<docker-hub-repo-name> [--tag <image-tag>]"
exit 1
}
-if [ $# -ne 1 ] && [ $# -ne 2 ]; then
+if [ $# -lt 1 ] || [ $# -gt 2 ]; then
print_usage
fi
-IMAGE_NAME="kafka-pm-producer"
+IMAGE_NAME="pm-file-converter"
IMAGE_TAG="latest"
REPO=""
if [ $1 == "no-push" ]; then
REPO=$1
echo "Attempt to push built image to: "$REPO
fi
+shift
+while [ $# -ne 0 ]; do
+ if [ $1 == "--tag" ]; then
+ shift
+ if [ -z "$1" ]; then
+ print_usage
+ fi
+ IMAGE_TAG=$1
+ echo "Setting image tag to: "$IMAGE_TAG
+ shift
+ else
+ echo "Unknown parameter: $1"
+ print_usage
+ fi
+done
-if [ "$2" != "" ]; then
- IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+./gen-cert.sh
+echo ""
+echo "Certs generated"
IMAGE=$IMAGE_NAME:$IMAGE_TAG
-echo "Building image $IMAGE"
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+ echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
+echo "Building image: $IMAGE with architecture: $DOCKER_DEFAULT_PLATFORM"
+
docker build -t $IMAGE_NAME:$IMAGE_TAG .
+
if [ $? -ne 0 ]; then
echo "BUILD FAILED"
exit 1
openssl genrsa -out server.key 2048
openssl ecparam -genkey -name secp384r1 -out server.key
-openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
+cat <<__EOF__ | openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
+SE
+.
+.
+.
+.
+.
+.
+__EOF__
\ No newline at end of file
cer, err := tls.LoadX509KeyPair(server_crt, server_key)
if err != nil {
- log.Error("Cannot load key and cert - %v\n", err)
+ log.Error("Cannot load key and cert - ", err)
return
}
config := &tls.Config{Certificates: []tls.Certificate{cer}}
https_server := &http.Server{Addr: ":" + strconv.Itoa(https_port), TLSConfig: config, Handler: nil}
- //TODO: Make http on/off configurable
// Run http
go func() {
log.Info("Starting http service...")
if err == http.ErrServerClosed { // graceful shutdown
log.Info("http server shutdown...")
} else if err != nil {
- log.Error("http server error: %v\n", err)
+ log.Error("http server error: ", err)
}
}()
- //TODO: Make https on/off configurable
// Run https
go func() {
log.Info("Starting https service...")
if err == http.ErrServerClosed { // graceful shutdown
log.Info("https server shutdown...")
} else if err != nil {
- log.Error("https server error: %v\n", err)
+ log.Error("https server error: ", err)
}
}()
check_tcp(strconv.Itoa(http_port))
log.Error("Registering producer: ", producer_instance_name, " - failed")
return false
} else {
- //TODO: http/https should be configurable
ok := send_http_request(json, http.MethodPut, "http://"+ics_server+"/data-producer/v1/info-types/"+data.ProdDataTypes[i].ID, true, creds_grant_type != "")
if !ok {
log.Error("Cannot register type: ", data.ProdDataTypes[i].ID)
log.Debug("Registering types: ", new_type_names)
m := make(map[string]interface{})
m["supported_info_types"] = new_type_names
- //TODO: http/https should be configurable
m["info_job_callback_url"] = "http://" + self + "/callbacks/job/" + producer_instance_name
- //TODO: http/https should be configurable
m["info_producer_supervision_callback_url"] = "http://" + self + "/callbacks/supervision/" + producer_instance_name
json, err := json.Marshal(m)
if dp.ext_job_created == true {
dp.ext_job_id = dp.InputJobType + "_" + generate_uuid_from_type(dp.InputJobType)
- //TODO: http/https should be configurable
ok := send_http_request(*dp.ext_job, http.MethodDelete, "http://"+ics_server+"/data-consumer/v1/info-jobs/"+dp.ext_job_id, true, creds_grant_type != "")
if !ok {
log.Error("Cannot delete job: ", dp.ext_job_id)
go start_job_json_file_data(dp.ID, job_record.job_control, job_record.data_in_channel, data_out_channel, false)
case "json-file-data-from-filestore-to-influx":
go start_job_json_file_data_influx(dp.ID, job_record.job_control, job_record.data_in_channel, true)
- // case "json-data-to-influx":
- // go start_job_json_data_influx(dp.ID, job_record.job_control, job_record.data_in_channel)
default:
}
}
dp.ext_job_id = dp.InputJobType + "_" + generate_uuid_from_type(dp.InputJobType)
- //TODO: http/https should be configurable
ok := false
for !ok {
ok = send_http_request(json, http.MethodPut, "http://"+ics_server+"/data-consumer/v1/info-jobs/"+dp.ext_job_id, true, creds_grant_type != "")
if !ok {
log.Error("Cannot register job: ", dp.InputJobType)
- //TODO: Restart after long time?
}
}
log.Debug("Registered job ok: ", dp.InputJobType)
return false
}
-// // Send a http request with json (json may be nil)
-// func send_http_request(json []byte, method string, url string, retry bool, useAuth bool) bool {
-
-// // set the HTTP method, url, and request body
-// var req *http.Request
-// var err error
-// if json == nil {
-// req, err = http.NewRequest(method, url, http.NoBody)
-// } else {
-// req, err = http.NewRequest(method, url, bytes.NewBuffer(json))
-// req.Header.Set("Content-Type", "application/json; charset=utf-8")
-// }
-// if err != nil {
-// log.Error("Cannot create http request, method: ", method, " url: ", url)
-// return false
-// }
-
-// if useAuth {
-// token, err := fetch_token()
-// if err != nil {
-// log.Error("Cannot fetch token for http request: ", err)
-// return false
-// }
-// req.Header.Set("Authorization", "Bearer "+token.TokenValue)
-// }
-
-// log.Debug("HTTP request: ", req)
-
-// retries := 1
-// if retry {
-// retries = 5
-// }
-// sleep_time := 1
-// for i := retries; i > 0; i-- {
-// log.Debug("Sending http request")
-// resp, err2 := httpclient.Do(req)
-// if err2 != nil {
-// log.Error("Http request error: ", err2)
-// log.Error("Cannot send http request method: ", method, " url: ", url, " - retries left: ", i-1)
-
-// time.Sleep(time.Duration(sleep_time) * time.Second)
-// sleep_time = 2 * sleep_time
-// } else {
-// if resp.StatusCode == 200 || resp.StatusCode == 201 || resp.StatusCode == 204 {
-// log.Debug("Accepted http status: ", resp.StatusCode)
-// resp.Body.Close()
-// return true
-// }
-// log.Debug("HTTP resp: ", resp)
-// resp.Body.Close()
-// }
-// }
-// return false
-// }
-
-// // Send a http request with json (json may be nil)
-// func send_http_request(json []byte, method string, url string, retry bool, useAuth bool) bool {
-// // initialize http client
-// client := &http.Client{}
-
-// // set the HTTP method, url, and request body
-// var req *http.Request
-// var err error
-// if json == nil {
-// req, err = http.NewRequest(method, url, http.NoBody)
-// } else {
-// req, err = http.NewRequest(method, url, bytes.NewBuffer(json))
-// req.Header.Set("Content-Type", "application/json; charset=utf-8")
-// }
-// if err != nil {
-// log.Error("Cannot create http request method: ", method, " url: ", url)
-// return false
-// }
-
-// useAuth = false
-// if useAuth {
-// token, err := fetch_token()
-// if err != nil {
-// log.Error("Cannot fetch token for http request: ", err)
-// return false
-// }
-// req.Header.Add("Authorization", "Bearer "+token.TokenValue)
-// }
-// log.Debug("HTTP request: ", req)
-
-// b, berr := io.ReadAll(req.Body)
-// if berr == nil {
-// log.Debug("HTTP request body length: ", len(b))
-// } else {
-// log.Debug("HTTP request - cannot check body length: ", berr)
-// }
-// if json == nil {
-// log.Debug("HTTP request null json")
-// } else {
-// log.Debug("HTTP request json: ", string(json))
-// }
-// requestDump, cerr := httputil.DumpRequestOut(req, true)
-// if cerr != nil {
-// fmt.Println(cerr)
-// }
-// fmt.Println(string(requestDump))
-
-// retries := 1
-// if retry {
-// retries = 5
-// }
-// sleep_time := 1
-// for i := retries; i > 0; i-- {
-// resp, err2 := client.Do(req)
-// if err2 != nil {
-// log.Error("Http request error: ", err2)
-// log.Error("Cannot send http request method: ", method, " url: ", url, " - retries left: ", i)
-
-// time.Sleep(time.Duration(sleep_time) * time.Second)
-// sleep_time = 2 * sleep_time
-// } else {
-// if resp.StatusCode == 200 || resp.StatusCode == 201 || resp.StatusCode == 204 {
-// log.Debug("Accepted http status: ", resp.StatusCode)
-// defer resp.Body.Close()
-// return true
-// }
-// }
-// }
-// return false
-// }
-
func fetch_token() (*kafka.OAuthBearerToken, error) {
log.Debug("Get token inline")
conf := &clientcredentials.Config{
log.Debug("=====================================================")
log.Debug("Expiration: ", token.Expiry)
t := token.Expiry
- // t := token.Expiry.Add(-time.Minute)
- // log.Debug("Modified expiration: ", t)
oauthBearerToken := kafka.OAuthBearerToken{
TokenValue: token.AccessToken,
Expiration: t,
return
}
- //TODO: Verify that job contains enough parameters...
-
if !job_found {
job_record = InfoJobRecord{}
job_record.job_info = t
jc.command = "ADD-FILTER"
- //TODO: Refactor
if t.InfoTypeIdentity == "json-file-data-from-filestore" || t.InfoTypeIdentity == "json-file-data" || t.InfoTypeIdentity == "json-file-data-from-filestore-to-influx" {
fm := FilterMaps{}
fm.sourceNameMap = make(map[string]bool)
w.WriteHeader(http.StatusOK)
}
-// producer statictics, all jobs
+// producer statistics, all jobs
func statistics(w http.ResponseWriter, req *http.Request) {
if req.Method != http.MethodGet {
w.WriteHeader(http.StatusMethodNotAllowed)
case reader_ctrl := <-control_ch:
if reader_ctrl.command == "EXIT" {
log.Info("Topic reader on topic: ", topic, " for type: ", type_id, " - stopped")
- //TODO: Stop consumer if present?
data_ch <- nil //Signal to job handler
running = false
return
}
}
default:
- //TODO: Handle these?
log.Debug("Dumping topic reader event on topic: ", topic, " for type: ", type_id, " evt: ", evt.String())
}
go func() {
for {
- //maxDur := 1 * time.Second
for {
select {
case reader_ctrl := <-control_ch:
var kmsg KafkaPayload
kmsg.msg = e
- c.Commit() //TODO: Ok?
+ c.Commit()
- //TODO: Check for exception
data_ch <- &kmsg
stats.in_msg_cnt++
log.Debug("Reader msg: ", &kmsg)
default:
fmt.Printf("Ignored %v\n", e)
}
-
- // orig code
- // msg, err := c.ReadMessage(maxDur)
- // if err == nil {
- // var kmsg KafkaPayload
- // kmsg.msg = msg
-
- // c.Commit() //TODO: Ok?
-
- // //TODO: Check for exception
- // data_ch <- &kmsg
- // stats.in_msg_cnt++
- // log.Debug("Reader msg: ", &kmsg)
- // log.Debug("Reader - data_ch ", data_ch)
- // } else {
- // log.Debug("Topic Reader for type: ", type_id, " Nothing to consume on topic: ", topic, ", reason: ", err)
- // }
-
}
}
}
time.Sleep(1 * time.Second)
} else {
log.Debug("Kafka producer started")
- //defer kafka_producer.Close()
}
}
}
for {
select {
case evt := <-kafka_producer.Events():
- //TODO: Handle this? Probably yes, look if the msg was delivered and if not, resend?
switch evt.(type) {
case *kafka.Message:
m := evt.(*kafka.Message)
case kmsg := <-data_ch:
if kmsg == nil {
event_chan <- 0
- // TODO: Close producer?
log.Info("Topic writer stopped by channel signal - start_topic_writer")
defer kafka_producer.Close()
return
}
}
if !msg_ok {
- //TODO: Retry sending msg?
log.Error("Topic writer failed to send message on topic: ", kmsg.topic, " - Msg discarded. Error details: ", err)
}
case <-time.After(1000 * time.Millisecond):
"client.id": cid,
"auto.offset.reset": "latest",
"enable.auto.commit": false,
- //"auto.commit.interval.ms": 5000,
}
} else {
log.Info("Creating kafka SASL plain text consumer for type: ", type_id)
}
c, err := kafka.NewConsumer(&cm)
- //TODO: How to handle autocommit or commit message by message
- //TODO: Make arg to kafka configurable
-
if err != nil {
log.Error("Cannot create kafka consumer for type: ", type_id, ", error details: ", err)
return nil
}
- //c.Commit()
log.Info("Created kafka consumer for type: ", type_id, " OK")
return c
}
}
}
}
- //TODO: Sort processed file conversions in order (FIFO)
jobLimiterChan <- struct{}{}
go run_xml_job(type_id, msg, "gz", data_out_channel, topic_list, jobLimiterChan, fvolume, fsbucket, mc, mc_id)
}
}
}
- //}()
}
func run_xml_job(type_id string, msg *KafkaPayload, outputCompression string, data_out_channel chan *KafkaPayload, topic_list map[string]string, jobLimiterChan chan struct{}, fvolume string, fsbucket string, mc *minio.Client, mc_id string) {
var reader io.Reader
- //TODO -> config
INPUTBUCKET := "ropfiles"
filename := ""
}
defer fi.Close()
reader = fi
- //} else if evt_data.ObjectStoreBucket != "" {
} else {
filename = evt_data.Name
if mc != nil {
start = time.Now()
var pmfile PMJsonFile
- //TODO: Fill in more values
pmfile.Event.Perf3GppFields.Perf3GppFieldsVersion = "1.0"
pmfile.Event.Perf3GppFields.MeasDataCollection.GranularityPeriod = 900
pmfile.Event.Perf3GppFields.MeasDataCollection.MeasuredEntityUserName = ""
filters := make(map[string]Filter)
filterParams_list := make(map[string]FilterMaps)
- // ch_list := make(map[string]chan *KafkaPayload)
topic_list := make(map[string]string)
var mc *minio.Client
const mc_id = "mc_" + "start_job_json_file_data"
log.Debug("Type job ", type_id, " new cmd received ", job_ctl.command)
switch job_ctl.command {
case "EXIT":
- //ignore cmd - handled by channel signal
case "ADD-FILTER":
- //TODO: Refactor...
filters[job_ctl.filter.JobId] = job_ctl.filter
log.Debug("Type job ", type_id, " updated, topic: ", job_ctl.filter.OutputTopic, " jobid: ", job_ctl.filter.JobId)
tmp_topic_list[job_ctl.filter.JobId] = job_ctl.filter.OutputTopic
topic_list = tmp_topic_list
case "REMOVE-FILTER":
- //TODO: Refactor...
log.Debug("Type job ", type_id, " removing job: ", job_ctl.filter.JobId)
tmp_filterParams_list := make(map[string]FilterMaps)
var reader io.Reader
- //TODO -> config
- //INPUTBUCKET := "json-file-ready"
INPUTBUCKET := "pm-files-json"
filename := ""
if objectstore == false {
log.Debug("Read file/obj: ", filename, "for type job: ", type_id, " time: ", time.Since(start).String())
}
- // The code was moved to inside the below for loop - a deep copy of PM PMJsonFile is need to keep it outside
- // var pmfile PMJsonFile
- // start := time.Now()
- // err = jsoniter.Unmarshal(*data, &pmfile)
- // log.Debug("Json unmarshal obj: ", filename, " time: ", time.Since(start).String())
-
- // if err != nil {
- // log.Error("Msg could not be unmarshalled - discarding message, obj: ", filename, " - error details:", err)
- // return
- // }
for k, v := range filterList {
var pmfile PMJsonFile
log.Debug("measObjClassMap:", v.measObjClassMap)
log.Debug("measObjInstIdsMap:", v.measObjInstIdsMap)
log.Debug("measTypesMap:", v.measTypesMap)
- //BMX if len(v.sourceNameMap) > 0 || len(v.measObjInstIdsMap) > 0 || len(v.measTypesMap) > 0 || len(v.measObjClassMap) > 0 {
+
b := json_pm_filter_to_byte(evt_data.Filename, &pmfile, v.sourceNameMap, v.measObjClassMap, v.measObjInstIdsMap, v.measTypesMap)
if b == nil {
log.Info("File/obj ", filename, "for job: ", k, " - empty after filering, discarding message ")
return
}
kmsg.msg.Value = *b
- //BMX}
-
- // if outputCompression == "json.gz" {
- // start := time.Now()
- // var buf bytes.Buffer
- // err := gzipWrite(&buf, &kmsg.msg.Value)
- // if err != nil {
- // log.Error("Cannot compress file/obj ", filename, "for job: ", job_id, " - discarding message, error details", err)
- // return
-
- // }
- // kmsg.msg.Value = buf.Bytes()
- // log.Debug("Compress file/obj ", filename, "for job: ", job_id, " time:", time.Since(start).String())
- // }
+
kmsg.topic = topic_list[k]
kmsg.jobid = k
return data
}
-// func json_pm_filter(resource string, data *PMJsonFile, sourceNameMap map[string]bool, measObjClassMap map[string]bool, measObjInstIdsMap map[string]bool, measTypesMap map[string]bool) *[]byte {
-
-// filter_req := true
-// start := time.Now()
-// if len(sourceNameMap) != 0 {
-// if !sourceNameMap[data.Event.CommonEventHeader.SourceName] {
-// filter_req = false
-// return nil
-// }
-// }
-// if filter_req {
-// modified := false
-// var temp_mil []MeasInfoList
-// for _, zz := range data.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList {
-
-// check_cntr := false
-// var cnt_flags []bool
-// if len(measTypesMap) > 0 {
-// c_cntr := 0
-// var temp_mtl []string
-// for _, v := range zz.MeasTypes.SMeasTypesList {
-// if measTypesMap[v] {
-// cnt_flags = append(cnt_flags, true)
-// c_cntr++
-// temp_mtl = append(temp_mtl, v)
-// } else {
-// cnt_flags = append(cnt_flags, false)
-// }
-// }
-// if c_cntr > 0 {
-// check_cntr = true
-// zz.MeasTypes.SMeasTypesList = temp_mtl
-// } else {
-// modified = true
-// continue
-// }
-// }
-// keep := false
-// var temp_mvl []MeasValues
-// for _, yy := range zz.MeasValuesList {
-// keep_class := false
-// keep_inst := false
-// keep_cntr := false
-
-// dna := strings.Split(yy.MeasObjInstID, ",")
-// instName := dna[len(dna)-1]
-// cls := strings.Split(dna[len(dna)-1], "=")[0]
-
-// if len(measObjClassMap) > 0 {
-// if measObjClassMap[cls] {
-// keep_class = true
-// }
-// } else {
-// keep_class = true
-// }
-
-// if len(measObjInstIdsMap) > 0 {
-// if measObjInstIdsMap[instName] {
-// keep_inst = true
-// }
-// } else {
-// keep_inst = true
-// }
-
-// if check_cntr {
-// var temp_mrl []MeasResults
-// cnt_p := 1
-// for _, v := range yy.MeasResultsList {
-// if cnt_flags[v.P-1] {
-// v.P = cnt_p
-// cnt_p++
-// temp_mrl = append(temp_mrl, v)
-// }
-// }
-// yy.MeasResultsList = temp_mrl
-// keep_cntr = true
-// } else {
-// keep_cntr = true
-// }
-// if keep_class && keep_cntr && keep_inst {
-// keep = true
-// temp_mvl = append(temp_mvl, yy)
-// }
-// }
-// if keep {
-// zz.MeasValuesList = temp_mvl
-// temp_mil = append(temp_mil, zz)
-// modified = true
-// }
-
-// }
-// //Only if modified
-// if modified {
-// if len(temp_mil) == 0 {
-// log.Debug("Msg filtered, nothing found, discarding, obj: ", resource)
-// return nil
-// }
-// data.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList = temp_mil
-// }
-// }
-// log.Debug("Filter: ", time.Since(start).String())
-
-// start = time.Now()
-// j, err := jsoniter.Marshal(&data)
-
-// log.Debug("Json marshal obj: ", resource, " time: ", time.Since(start).String())
-
-// if err != nil {
-// log.Error("Msg could not be marshalled discarding message, obj: ", resource, ", error details: ", err)
-// return nil
-// }
-
-// log.Debug("Filtered json obj: ", resource, " len: ", len(j))
-// return &j
-// }
-
func start_job_json_file_data_influx(type_id string, control_ch chan JobControl, data_in_ch chan *KafkaPayload, objectstore bool) {
log.Info("Type job", type_id, " started")
case "EXIT":
//ignore cmd - handled by channel signal
case "ADD-FILTER":
- //TODO: Refactor...
+
filters[job_ctl.filter.JobId] = job_ctl.filter
log.Debug("Type job ", type_id, " updated, topic: ", job_ctl.filter.OutputTopic, " jobid: ", job_ctl.filter.JobId)
log.Debug(job_ctl.filter)
influx_job_params = tmp_influx_job_params
case "REMOVE-FILTER":
- //TODO: Refactor...
+
log.Debug("Type job ", type_id, " removing job: ", job_ctl.filter.JobId)
tmp_filterParams_list := make(map[string]FilterMaps)
}
}
}
- //TODO: Sort processed file conversions in order (FIFO)
+
jobLimiterChan <- struct{}{}
go run_json_file_data_job_influx(type_id, msg, filterParams_list, influx_job_params, jobLimiterChan, mc, mc_id, objectstore)
var reader io.Reader
- //TODO -> config
- //INPUTBUCKET := "json-file-ready"
INPUTBUCKET := "pm-files-json"
filename := ""
if objectstore == false {
log.Debug("Read file/obj: ", filename, "for type job: ", type_id, " time: ", time.Since(start).String())
}
- // The code was moved to inside the below for loop - a deep copy of PM PMJsonFile is need to keep it outside
- // var pmfile PMJsonFile
- // start := time.Now()
- // err = jsoniter.Unmarshal(*data, &pmfile)
- // log.Debug("Json unmarshal obj: ", filename, " time: ", time.Since(start).String())
-
- // if err != nil {
- // log.Error("Msg could not be unmarshalled - discarding message, obj: ", filename, " - error details:", err)
- // return
- // }
for k, v := range filterList {
var pmfile PMJsonFile
client := influxdb2.NewClient(fluxParms.DbUrl, fluxParms.DbToken)
writeAPI := client.WriteAPIBlocking(fluxParms.DbOrg, fluxParms.DbBucket)
- // fmt.Println(pmfile.Event.CommonEventHeader.StartEpochMicrosec)
- // tUnix := pmfile.Event.CommonEventHeader.StartEpochMicrosec / int64(time.Millisecond)
- // tUnixNanoRemainder := (pmfile.Event.CommonEventHeader.StartEpochMicrosec % int64(time.Millisecond)) * int64(time.Microsecond)
- // timeT := time.Unix(tUnix, tUnixNanoRemainder)
- // fmt.Println(timeT)
- // fmt.Println("======================")
for _, zz := range pmfile.Event.Perf3GppFields.MeasDataCollection.SMeasInfoList {
ctr_names := make(map[string]string)
for cni, cn := range zz.MeasTypes.SMeasTypesList {
- ctr_names[string(cni+1)] = cn
+ ctr_names[strconv.Itoa(cni+1)] = cn
}
for _, xx := range zz.MeasValuesList {
log.Debug("Measurement: ", xx.MeasObjInstID)
p.AddField("granularityperiod", pmfile.Event.Perf3GppFields.MeasDataCollection.GranularityPeriod)
p.AddField("timezone", pmfile.Event.CommonEventHeader.TimeZoneOffset)
for _, yy := range xx.MeasResultsList {
- pi := string(yy.P)
+ pi := strconv.Itoa(yy.P)
pv := yy.SValue
pn := ctr_names[pi]
log.Debug("Counter: ", pn, " Value: ", pv)
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIIBRzCBzQIJAJkBcpKz3wnCMAoGCCqGSM49BAMCMA0xCzAJBgNVBAYTAlNFMB4X
+DTIzMDUyMjA3Mjg1MloXDTMzMDUxOTA3Mjg1MlowDTELMAkGA1UEBhMCU0UwdjAQ
+BgcqhkjOPQIBBgUrgQQAIgNiAAQ8KdpOCp9l+L+JcW2dpy95Ir72FUGOR8Uvcyrg
+OTAcRN9fNCLXIP3PHVj6UxpemtE72R7zU8qADQV1DYZayEKDaIikf9njPfjenqud
+LUSICMjigLL1XznnGe0nCDaA24EwCgYIKoZIzj0EAwIDaQAwZgIxAI6jg1kSSjf7
+3gbfK5UEWun+HVsxiQyBpBm7DhARvtkJsLUiVV+RMF8mukAOrXjHYgIxAM4RuOkz
+56JuyrG6CouKJlHy2Q0R+Olg2GY8bIV9GzePG4NWYfT//z1vjfEmmiNMcA==
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN EC PARAMETERS-----
+BgUrgQQAIg==
+-----END EC PARAMETERS-----
+-----BEGIN EC PRIVATE KEY-----
+MIGkAgEBBDBDwFMblWUdInRL04EnD+J2y6h06559cdvMz7SjuzaHErcwcrC+slm1
+jSKpiV8hgY6gBwYFK4EEACKhZANiAAQ8KdpOCp9l+L+JcW2dpy95Ir72FUGOR8Uv
+cyrgOTAcRN9fNCLXIP3PHVj6UxpemtE72R7zU8qADQV1DYZayEKDaIikf9njPfje
+nqudLUSICMjigLL1XznnGe0nCDaA24E=
+-----END EC PRIVATE KEY-----
# ============LICENSE_END=================================================
#
-FROM golang:1.19-bullseye AS build
+FROM golang:1.20.3-buster AS build
+
WORKDIR /app
-COPY go.mod .
-COPY go.sum .
-RUN go mod download
+
COPY main.go .
+RUN go mod init main
+RUN go mod tidy
+
RUN go build -o /pm-rapp
-#Replaced distroless image with ubuntu for debug purposes (seem to be cert problems with distroless image...?)
FROM gcr.io/distroless/base-debian11
-#FROM ubuntu
+
WORKDIR /
## Copy from "build" stage
COPY --from=build /pm-rapp .
-##Uncomment this when using distroless image
-USER nonroot:nonroot
ENTRYPOINT ["/pm-rapp"]
-## Basic rAPP for demo purpose - starts a subscription and prints out received data on the topic to stdout
-
+## Basic rAPP for demo purpose
### Manual build, tag and push to image repo
Build for remote kubernetes - an externally accessible image repo (e.g. docker hub) is needed \
`./build.sh <external-image-repo> [<image-tag>]`
+## Function
+
+The rApp starts a job subscription and prints (option) the received data to standard out. The purpose with this app is to simulate a real app subscribing to data.
+
+The rapp can be configured to used plain text, plain text SSL or plain text SASL towards kafka.
+
### Configuration
+The container expects the following environment variables:
+
+- APPID : Should be a unique name (for example the name of the POD).
+
+- APPNS : Should be the name of namespace.
+
+- KAFKA_SERVER : Host and port of the kafka bootstrap server.
+
+- TOPIC : The kafka topic where data is delivered by the job.
+
+- ICS : Host and port to the information coordinator server.
+
+The remaining env vars are optional.
+
+- JWT_FILE : File path to mounted file where a valid token is stored. If used, the app expects the file to be regularly updated by a sidecar container. Only for SASL plain text towards kafka.
+
+- SSLPATH : Path to mounted cert and key for secure kafka communication. Only for secure plaintext interface towards kafka.
+
+- GZIP : If set (any value) the payload from kafka is expected to be in gzip format.
+
+- LOG_PAYLOAD : If set (any value) the received payload is printed to standard out.
+
+The following are optional and used only if the app fetches the token instead of a configured sidecar. Only for SASL plain text towards kafka.
+
+- CREDS_GRANT_TYPE : Grant type (keycloak)
+- CREDS_CLIENT_SECRET : Client secret (keycloak)
+- CREDS_CLIENT_ID : Client id (keycloak)
+- AUTH_SERVICE_URL : Url to keycloak for requesting a token.
+
+
+
+The subscription json is expected on the path "/config/jobDefinition.json".
+The rapp set topic and bootstrapserver from the above env vars before subscribing to the data.
+
+
+
+## License
+
+Copyright (C) 2023 Nordix Foundation. All rights reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
\ No newline at end of file
+++ /dev/null
-
-pm-rapp
-=======
-Dockerfile
-- Remove ref to ubuntu image (if distroless image work ok)
-
-
-
-install/helm
-============
#
# Build image from Dockerfile with/without custom image tag
-# Optionally push to external docker hub repo
+# Optionally push to external image repo
print_usage() {
echo "Usage: build.sh no-push|<docker-hub-repo-name> [<image-tag>]"
echo "Attempt to push built image to: "$REPO
fi
-if [ "$2" != "" ]; then
- IMAGE_TAG=$2
-fi
- echo "Setting image tag to: "$IMAGE_TAG
+shift
+while [ $# -ne 0 ]; do
+ if [ $1 == "--tag" ]; then
+ shift
+ if [ -z "$1" ]; then
+ print_usage
+ fi
+ IMAGE_TAG=$1
+ echo "Setting image tag to: "$IMAGE_TAG
+ shift
+ else
+ echo "Unknown parameter: $1"
+ print_usage
+ fi
+done
IMAGE=$IMAGE_NAME:$IMAGE_TAG
+
+export DOCKER_DEFAULT_PLATFORM=linux/amd64
+CURRENT_PLATFORM=$(docker system info --format '{{.OSType}}/{{.Architecture}}')
+if [ $CURRENT_PLATFORM != $DOCKER_DEFAULT_PLATFORM ]; then
+ echo "Image may not work on the current platform: $CURRENT_PLATFORM, only platform $DOCKER_DEFAULT_PLATFORM supported"
+fi
+
echo "Building image $IMAGE"
docker build -t $IMAGE_NAME:$IMAGE_TAG .
if [ $? -ne 0 ]; then
# The Jenkins job requires a tag to build the Docker image.
# By default this file is in the docker build directory,
-# but the location can configured in the JJB template.
+# but the location can be configured in the JJB template.
---
tag: 1.0.0
\ No newline at end of file
var log_payload = os.Getenv("LOG_PAYLOAD")
-// This are optional - if rapp is fethcing the token instead of the side car
+// These are optional - if rapp is fethcing the token instead of the side car
var creds_grant_type = os.Getenv("CREDS_GRANT_TYPE")
var creds_client_secret = os.Getenv("CREDS_CLIENT_SECRET")
var creds_client_id = os.Getenv("CREDS_CLIENT_ID")
}
if jwt_file != "" || creds_service_url != "" {
if accessToken != "" {
- req.Header.Add("authorization", accessToken)
+ req.Header.Set("Authorization", "Bearer "+accessToken)
} else {
log.Error("Cannot create http request for url: ", url, " - token missing")
return false, nil
}
}
} else {
- log.Error("Bad response, method: ", method, " url: ", url, " resp: ", resp.StatusCode)
+ log.Error("Bad response, method: ", method, " url: ", url, " resp: ", resp.StatusCode, " resp: ", resp)
}
}
}