Merge "Add schemas for input events of ODU slice assurance usecase"
authorHenrik Andersson <henrik.b.andersson@est.tech>
Fri, 19 Nov 2021 09:55:12 +0000 (09:55 +0000)
committerGerrit Code Review <gerrit@o-ran-sc.org>
Fri, 19 Nov 2021 09:55:12 +0000 (09:55 +0000)
123 files changed:
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/BeanFactory.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/clients/AsyncRestClient.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/controllers/ProducerCallbacksController.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Job.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Jobs.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumer.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java [new file with mode: 0644]
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java [new file with mode: 0644]
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java [deleted file]
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumers.java
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java [new file with mode: 0644]
dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/ProducerRegstrationTask.java
dmaap-adaptor-java/src/main/resources/typeSchemaKafka.json
dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ApplicationTest.java
dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ConsumerController.java
dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/EcsSimulatorController.java
dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/IntegrationWithKafka.java
dmaap-mediator-producer/Dockerfile
dmaap-mediator-producer/README.md
dmaap-mediator-producer/internal/jobs/jobs.go
dmaap-mediator-producer/internal/jobs/jobs_test.go
dmaap-mediator-producer/internal/server/server.go
dmaap-mediator-producer/internal/server/server_test.go
dmaap-mediator-producer/main.go
dmaap-mediator-producer/mocks/jobhandler/JobHandler.go
dmaap-mediator-producer/stub/consumer/consumerstub.go
dmaap-mediator-producer/stub/dmaap/mrstub.go
docker-compose/.env [new file with mode: 0644]
docker-compose/a1-sim/docker-compose.yaml
docker-compose/dmaap-mediator-go/docker-compose.yaml
docker-compose/dmaap-mediator-java/docker-compose.yaml
docker-compose/ecs/docker-compose.yaml
docker-compose/policy-service/docker-compose.yaml
docker-compose/rapp/docker-compose.yaml
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/clients/AsyncRestClient.java
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/a1e/A1eController.java
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1consumer/ConsumerController.java
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1producer/ProducerCallbacks.java
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/InfoTypeSubscriptions.java
enrichment-coordinator-service/src/main/java/org/oransc/enrichment/tasks/ProducerSupervision.java
enrichment-coordinator-service/src/test/java/org/oransc/enrichment/ApplicationTest.java
onap/oran
test/auto-test/FTC1.sh
test/auto-test/FTC3000.sh
test/auto-test/ONAP_UC.sh
test/auto-test/startMR.sh
test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka [new file with mode: 0644]
test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json [new file with mode: 0644]
test/common/README.md
test/common/agent_api_functions.sh
test/common/api_curl.sh
test/common/consul_cbs_functions.sh
test/common/control_panel_api_functions.sh
test/common/controller_api_functions.sh
test/common/cr_api_functions.sh
test/common/dmaapadp_api_functions.sh
test/common/dmaapmed_api_functions.sh
test/common/ecs_api_functions.sh
test/common/gateway_api_functions.sh
test/common/genstat.sh [new file with mode: 0755]
test/common/http_proxy_api_functions.sh
test/common/kube_proxy_api_functions.sh
test/common/mr_api_functions.sh
test/common/prodstub_api_functions.sh
test/common/pvccleaner_api_functions.sh
test/common/rapp_catalogue_api_functions.sh
test/common/ricsimulator_api_functions.sh
test/common/test_env-onap-guilin.sh
test/common/test_env-onap-honolulu.sh
test/common/test_env-onap-istanbul.sh
test/common/test_env-oran-cherry.sh
test/common/test_env-oran-d-release.sh
test/common/test_env-oran-e-release.sh
test/common/testcase_common.sh
test/cr/app/cr.py
test/cr/app/nginx.conf
test/mrstub/app/main.py
test/mrstub/app/nginx.conf
test/simulator-group/dmaapadp/application.yaml
test/simulator-group/dmaapadp/application_configuration.json
test/simulator-group/dmaapadp/mnt/.gitignore [new file with mode: 0644]
test/simulator-group/dmaapmed/app.yaml
test/simulator-group/dmaapmed/docker-compose.yml
test/simulator-group/dmaapmed/mnt/.gitignore [new file with mode: 0644]
test/simulator-group/dmaapmed/type_config.json
test/simulator-group/dmaapmr/app.yaml
test/simulator-group/dmaapmr/configs/kafka/zk_client_jaas.conf [moved from test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf with 100% similarity]
test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties [moved from test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties with 96% similarity]
test/simulator-group/dmaapmr/configs/mr/cadi.properties [new file with mode: 0644]
test/simulator-group/dmaapmr/configs/mr/logback.xml [moved from test/simulator-group/dmaapmr/mnt2/mr/logback.xml with 99% similarity]
test/simulator-group/dmaapmr/configs/zk/zk_server_jaas.conf [moved from test/simulator-group/dmaapmr/mnt2/zk/zk_server_jaas.conf with 100% similarity]
test/simulator-group/dmaapmr/docker-compose.yml
test/simulator-group/dmaapmr/mnt/.gitignore [new file with mode: 0644]
test/simulator-group/dmaapmr/mnt/kafka/zk_client_jaas.conf
test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties [deleted file]
test/simulator-group/dmaapmr/mnt/mr/MsgRtrApi.properties
test/simulator-group/dmaapmr/mnt/mr/cadi.properties
test/simulator-group/dmaapmr/mnt/mr/logback.xml
test/simulator-group/dmaapmr/mnt/zk/zk_server_jaas.conf
test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties [deleted file]
test/simulator-group/dmaapmr/mnt2/mr/cadi.properties [deleted file]
test/simulator-group/dmaapmr/svc.yaml
test/simulator-group/mrstub/app.yaml
test/simulator-group/mrstub/docker-compose.yml
test/simulator-group/sdnc/app.yaml
test/simulator-group/sdnc/app2.yaml
test/simulator-group/sdnc/svc.yaml
test/usecases/oruclosedlooprecovery/goversion/.gitignore
test/usecases/oruclosedlooprecovery/goversion/README.md
test/usecases/oruclosedlooprecovery/goversion/go.mod
test/usecases/oruclosedlooprecovery/goversion/go.sum
test/usecases/oruclosedlooprecovery/goversion/internal/config/config.go
test/usecases/oruclosedlooprecovery/goversion/internal/config/config_test.go
test/usecases/oruclosedlooprecovery/goversion/internal/linkfailure/linkfailurehandler.go
test/usecases/oruclosedlooprecovery/goversion/internal/restclient/client.go
test/usecases/oruclosedlooprecovery/goversion/internal/restclient/client_test.go
test/usecases/oruclosedlooprecovery/goversion/main.go
test/usecases/oruclosedlooprecovery/goversion/main_test.go
test/usecases/oruclosedlooprecovery/goversion/mocks/Server.go [deleted file]
test/usecases/oruclosedlooprecovery/goversion/security/consumer.crt [new file with mode: 0644]
test/usecases/oruclosedlooprecovery/goversion/security/consumer.key [new file with mode: 0644]
test/usecases/oruclosedlooprecovery/goversion/stub/producer/producerstub.go [moved from test/usecases/oruclosedlooprecovery/goversion/simulator/producer.go with 100% similarity]
test/usecases/oruclosedlooprecovery/goversion/stub/sdnr/sdnrstub.go [new file with mode: 0644]

index faf5742..d98a8c3 100644 (file)
@@ -26,9 +26,6 @@ import org.apache.catalina.connector.Connector;
 import org.oran.dmaapadapter.configuration.ApplicationConfig;
 import org.oran.dmaapadapter.repository.InfoType;
 import org.oran.dmaapadapter.repository.InfoTypes;
-import org.oran.dmaapadapter.repository.Jobs;
-import org.oran.dmaapadapter.tasks.DmaapTopicConsumer;
-import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.beans.factory.annotation.Value;
 import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory;
@@ -38,7 +35,6 @@ import org.springframework.context.annotation.Configuration;
 
 @Configuration
 public class BeanFactory {
-    private InfoTypes infoTypes;
 
     @Value("${server.http-port}")
     private int httpPort = 0;
@@ -49,24 +45,9 @@ public class BeanFactory {
     }
 
     @Bean
-    public InfoTypes types(@Autowired ApplicationConfig appConfig, @Autowired Jobs jobs,
-            @Autowired KafkaTopicConsumers kafkaConsumers) {
-        if (infoTypes != null) {
-            return infoTypes;
-        }
-
+    public InfoTypes types(@Autowired ApplicationConfig appConfig) {
         Collection<InfoType> types = appConfig.getTypes();
-
-        // Start a consumer for each type
-        for (InfoType type : types) {
-            if (type.isDmaapTopicDefined()) {
-                DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs);
-                topicConsumer.start();
-            }
-        }
-        infoTypes = new InfoTypes(types);
-        kafkaConsumers.start(infoTypes);
-        return infoTypes;
+        return new InfoTypes(types);
     }
 
     @Bean
index ec1541c..8b3efed 100644 (file)
@@ -62,101 +62,92 @@ public class AsyncRestClient {
         this.httpProxyConfig = httpProxyConfig;
     }
 
-    public Mono<ResponseEntity<String>> postForEntity(String uri, @Nullable String body) {
+    public Mono<ResponseEntity<String>> postForEntity(String uri, @Nullable String body,
+            @Nullable MediaType contentType) {
         Object traceTag = createTraceTag();
         logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} POST body: {}", traceTag, body);
         Mono<String> bodyProducer = body != null ? Mono.just(body) : Mono.empty();
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.post() //
-                            .uri(uri) //
-                            .contentType(MediaType.APPLICATION_JSON) //
-                            .body(bodyProducer, String.class);
-                    return retrieve(traceTag, request);
-                });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+                .post() //
+                .uri(uri) //
+                .contentType(contentType) //
+                .body(bodyProducer, String.class);
+        return retrieve(traceTag, request);
     }
 
-    public Mono<String> post(String uri, @Nullable String body) {
-        return postForEntity(uri, body) //
-                .flatMap(this::toBody);
+    public Mono<String> post(String uri, @Nullable String body, @Nullable MediaType contentType) {
+        return postForEntity(uri, body, contentType) //
+                .map(this::toBody);
     }
 
-    public Mono<String> postWithAuthHeader(String uri, String body, String username, String password) {
+    public Mono<String> postWithAuthHeader(String uri, String body, String username, String password,
+            MediaType mediaType) {
         Object traceTag = createTraceTag();
         logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} POST body: {}", traceTag, body);
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.post() //
-                            .uri(uri) //
-                            .headers(headers -> headers.setBasicAuth(username, password)) //
-                            .contentType(MediaType.APPLICATION_JSON) //
-                            .bodyValue(body);
-                    return retrieve(traceTag, request) //
-                            .flatMap(this::toBody);
-                });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+                .post() //
+                .uri(uri) //
+                .headers(headers -> headers.setBasicAuth(username, password)) //
+                .contentType(mediaType) //
+                .bodyValue(body);
+        return retrieve(traceTag, request) //
+                .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> putForEntity(String uri, String body) {
         Object traceTag = createTraceTag();
         logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} PUT body: {}", traceTag, body);
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.put() //
-                            .uri(uri) //
-                            .contentType(MediaType.APPLICATION_JSON) //
-                            .bodyValue(body);
-                    return retrieve(traceTag, request);
-                });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+                .put() //
+                .uri(uri) //
+                .contentType(MediaType.APPLICATION_JSON) //
+                .bodyValue(body);
+        return retrieve(traceTag, request);
     }
 
     public Mono<ResponseEntity<String>> putForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} PUT body: <empty>", traceTag);
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.put() //
-                            .uri(uri);
-                    return retrieve(traceTag, request);
-                });
+        RequestHeadersSpec<?> request = getWebClient() //
+                .put() //
+                .uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> put(String uri, String body) {
         return putForEntity(uri, body) //
-                .flatMap(this::toBody);
+                .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> getForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri);
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.get().uri(uri);
-                    return retrieve(traceTag, request);
-                });
+        RequestHeadersSpec<?> request = getWebClient().get().uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> get(String uri) {
         return getForEntity(uri) //
-                .flatMap(this::toBody);
+                .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> deleteForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri);
-        return getWebClient() //
-                .flatMap(client -> {
-                    RequestHeadersSpec<?> request = client.delete().uri(uri);
-                    return retrieve(traceTag, request);
-                });
+        RequestHeadersSpec<?> request = getWebClient().delete().uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> delete(String uri) {
         return deleteForEntity(uri) //
-                .flatMap(this::toBody);
+                .map(this::toBody);
     }
 
     private Mono<ResponseEntity<String>> retrieve(Object traceTag, RequestHeadersSpec<?> request) {
@@ -185,11 +176,11 @@ public class AsyncRestClient {
         }
     }
 
-    private Mono<String> toBody(ResponseEntity<String> entity) {
+    private String toBody(ResponseEntity<String> entity) {
         if (entity.getBody() == null) {
-            return Mono.just("");
+            return "";
         } else {
-            return Mono.just(entity.getBody());
+            return entity.getBody();
         }
     }
 
@@ -229,11 +220,11 @@ public class AsyncRestClient {
                 .build();
     }
 
-    private Mono<WebClient> getWebClient() {
+    private WebClient getWebClient() {
         if (this.webClient == null) {
             this.webClient = buildWebClient(baseUrl);
         }
-        return Mono.just(buildWebClient(baseUrl));
+        return this.webClient;
     }
 
 }
index e4dca5b..07f5aa7 100644 (file)
@@ -82,11 +82,9 @@ public class ProducerCallbacksController {
             @RequestBody String body) {
         try {
             ProducerJobInfo request = gson.fromJson(body, ProducerJobInfo.class);
-
-            logger.info("Job started callback {}", request.id);
-            Job job = new Job(request.id, request.targetUri, types.getType(request.typeId), request.owner,
+            logger.debug("Job started callback {}", request.id);
+            this.jobs.addJob(request.id, request.targetUri, types.getType(request.typeId), request.owner,
                     request.lastUpdated, toJobParameters(request.jobData));
-            this.jobs.put(job);
             return new ResponseEntity<>(HttpStatus.OK);
         } catch (Exception e) {
             return ErrorResponse.create(e, HttpStatus.NOT_FOUND);
@@ -123,7 +121,7 @@ public class ProducerCallbacksController {
     public ResponseEntity<Object> jobDeletedCallback( //
             @PathVariable("infoJobId") String infoJobId) {
 
-        logger.info("Job deleted callback {}", infoJobId);
+        logger.debug("Job deleted callback {}", infoJobId);
         this.jobs.remove(infoJobId);
         return new ResponseEntity<>(HttpStatus.OK);
     }
index d1697e9..5f7521c 100644 (file)
 
 package org.oran.dmaapadapter.repository;
 
+import java.time.Duration;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import lombok.Getter;
 
 import org.immutables.gson.Gson;
+import org.oran.dmaapadapter.clients.AsyncRestClient;
 
 public class Job {
 
     @Gson.TypeAdapters
     public static class Parameters {
-        public String filter;
-        public BufferTimeout bufferTimeout;
+        @Getter
+        private String filter;
+        @Getter
+        private BufferTimeout bufferTimeout;
 
-        public Parameters() {
-        }
+        private int maxConcurrency;
+
+        public Parameters() {}
 
-        public Parameters(String filter, BufferTimeout bufferTimeout) {
+        public Parameters(String filter, BufferTimeout bufferTimeout, int maxConcurrency) {
             this.filter = filter;
             this.bufferTimeout = bufferTimeout;
+            this.maxConcurrency = maxConcurrency;
         }
 
-        public static class BufferTimeout {
-            public BufferTimeout(int maxSize, int maxTimeMiliseconds) {
-                this.maxSize = maxSize;
-                this.maxTimeMiliseconds = maxTimeMiliseconds;
-            }
+        public int getMaxConcurrency() {
+            return maxConcurrency == 0 ? 1 : maxConcurrency;
+        }
+    }
 
-            public BufferTimeout() {
-            }
+    @Gson.TypeAdapters
+    public static class BufferTimeout {
+        public BufferTimeout(int maxSize, long maxTimeMiliseconds) {
+            this.maxSize = maxSize;
+            this.maxTimeMiliseconds = maxTimeMiliseconds;
+        }
 
-            public int maxSize;
-            public int maxTimeMiliseconds;
+        public BufferTimeout() {}
+
+        @Getter
+        private int maxSize;
+
+        private long maxTimeMiliseconds;
+
+        public Duration getMaxTime() {
+            return Duration.ofMillis(maxTimeMiliseconds);
         }
     }
 
@@ -76,7 +92,11 @@ public class Job {
 
     private final Pattern jobDataFilter;
 
-    public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters) {
+    @Getter
+    private final AsyncRestClient consumerRestClient;
+
+    public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters,
+            AsyncRestClient consumerRestClient) {
         this.id = id;
         this.callbackUrl = callbackUrl;
         this.type = type;
@@ -88,6 +108,7 @@ public class Job {
         } else {
             jobDataFilter = null;
         }
+        this.consumerRestClient = consumerRestClient;
     }
 
     public boolean isFilterMatch(String data) {
index 8a38824..0e7743d 100644 (file)
 
 package org.oran.dmaapadapter.repository;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Vector;
 
+import org.oran.dmaapadapter.clients.AsyncRestClient;
+import org.oran.dmaapadapter.clients.AsyncRestClientFactory;
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
 import org.oran.dmaapadapter.exceptions.ServiceException;
-import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
+import org.oran.dmaapadapter.repository.Job.Parameters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -34,14 +39,21 @@ import org.springframework.stereotype.Component;
 
 @Component
 public class Jobs {
+    public interface Observer {
+        void onJobbAdded(Job job);
+
+        void onJobRemoved(Job job);
+    }
+
     private static final Logger logger = LoggerFactory.getLogger(Jobs.class);
 
     private Map<String, Job> allJobs = new HashMap<>();
     private MultiMap<Job> jobsByType = new MultiMap<>();
-    private final KafkaTopicConsumers kafkaConsumers;
+    private final AsyncRestClientFactory restclientFactory;
+    private final List<Observer> observers = new ArrayList<>();
 
-    public Jobs(@Autowired KafkaTopicConsumers kafkaConsumers) {
-        this.kafkaConsumers = kafkaConsumers;
+    public Jobs(@Autowired ApplicationConfig applicationConfig) {
+        restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
     }
 
     public synchronized Job getJob(String id) throws ServiceException {
@@ -56,11 +68,28 @@ public class Jobs {
         return allJobs.get(id);
     }
 
-    public synchronized void put(Job job) {
+    public void addJob(String id, String callbackUrl, InfoType type, String owner, String lastUpdated,
+            Parameters parameters) {
+        AsyncRestClient consumerRestClient = type.isUseHttpProxy() //
+                ? restclientFactory.createRestClientUseHttpProxy(callbackUrl) //
+                : restclientFactory.createRestClientNoHttpProxy(callbackUrl);
+        Job job = new Job(id, callbackUrl, type, owner, lastUpdated, parameters, consumerRestClient);
+        this.put(job);
+        synchronized (observers) {
+            this.observers.forEach(obs -> obs.onJobbAdded(job));
+        }
+    }
+
+    public void addObserver(Observer obs) {
+        synchronized (observers) {
+            this.observers.add(obs);
+        }
+    }
+
+    private synchronized void put(Job job) {
         logger.debug("Put job: {}", job.getId());
         allJobs.put(job.getId(), job);
         jobsByType.put(job.getType().getId(), job.getId(), job);
-        kafkaConsumers.addJob(job);
     }
 
     public synchronized Iterable<Job> getAll() {
@@ -75,10 +104,14 @@ public class Jobs {
         return job;
     }
 
-    public synchronized void remove(Job job) {
-        this.allJobs.remove(job.getId());
-        jobsByType.remove(job.getType().getId(), job.getId());
-        kafkaConsumers.removeJob(job);
+    public void remove(Job job) {
+        synchronized (this) {
+            this.allJobs.remove(job.getId());
+            jobsByType.remove(job.getType().getId(), job.getId());
+        }
+        synchronized (observers) {
+            this.observers.forEach(obs -> obs.onJobRemoved(job));
+        }
     }
 
     public synchronized int size() {
index 7d55758..217a072 100644 (file)
@@ -29,6 +29,7 @@ import org.oran.dmaapadapter.repository.InfoType;
 import org.oran.dmaapadapter.repository.Jobs;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.springframework.http.MediaType;
 
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.FluxSink;
@@ -38,14 +39,12 @@ import reactor.core.publisher.Mono;
  * The class fetches incoming requests from DMAAP and sends them further to the
  * consumers that has a job for this InformationType.
  */
-
 public class DmaapTopicConsumer {
     private static final Duration TIME_BETWEEN_DMAAP_RETRIES = Duration.ofSeconds(10);
     private static final Logger logger = LoggerFactory.getLogger(DmaapTopicConsumer.class);
 
     private final AsyncRestClient dmaapRestClient;
     private final InfiniteFlux infiniteSubmitter = new InfiniteFlux();
-    private final AsyncRestClient consumerRestClient;
     protected final ApplicationConfig applicationConfig;
     protected final InfoType type;
     protected final Jobs jobs;
@@ -85,8 +84,6 @@ public class DmaapTopicConsumer {
         AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
         this.dmaapRestClient = restclientFactory.createRestClientNoHttpProxy("");
         this.applicationConfig = applicationConfig;
-        this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("")
-                : restclientFactory.createRestClientNoHttpProxy("");
         this.type = type;
         this.jobs = jobs;
     }
@@ -108,7 +105,8 @@ public class DmaapTopicConsumer {
 
     private Mono<String> handleDmaapErrorResponse(Throwable t) {
         logger.debug("error from DMAAP {} {}", t.getMessage(), type.getDmaapTopicUrl());
-        return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES).flatMap(notUsed -> Mono.empty());
+        return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES) //
+                .flatMap(notUsed -> Mono.empty());
     }
 
     private Mono<String> getFromMessageRouter(String topicUrl) {
@@ -130,8 +128,8 @@ public class DmaapTopicConsumer {
 
         // Distibute the body to all jobs for this type
         return Flux.fromIterable(this.jobs.getJobsForType(this.type)) //
-                .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl()))
-                .flatMap(job -> consumerRestClient.post(job.getCallbackUrl(), body), CONCURRENCY) //
+                .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl())) //
+                .flatMap(job -> job.getConsumerRestClient().post("", body, MediaType.APPLICATION_JSON), CONCURRENCY) //
                 .onErrorResume(this::handleConsumerErrorResponse);
     }
 }
diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java
new file mode 100644 (file)
index 0000000..9447c3a
--- /dev/null
@@ -0,0 +1,43 @@
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
+import org.oran.dmaapadapter.repository.InfoType;
+import org.oran.dmaapadapter.repository.InfoTypes;
+import org.oran.dmaapadapter.repository.Jobs;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+@Component
+public class DmaapTopicConsumers {
+
+    DmaapTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types, @Autowired Jobs jobs) {
+        // Start a consumer for each type
+        for (InfoType type : types.getAll()) {
+            if (type.isDmaapTopicDefined()) {
+                DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs);
+                topicConsumer.start();
+            }
+        }
+    }
+
+}
diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java
new file mode 100644 (file)
index 0000000..5550ce0
--- /dev/null
@@ -0,0 +1,139 @@
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import lombok.Getter;
+
+import org.oran.dmaapadapter.repository.Job;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.http.MediaType;
+import org.springframework.web.reactive.function.client.WebClientResponseException;
+
+import reactor.core.Disposable;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.core.publisher.Sinks.Many;
+
+/**
+ * The class streams data from a multi cast sink and sends the data to the Job
+ * owner via REST calls.
+ */
+@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
+public class KafkaJobDataConsumer {
+    private static final Logger logger = LoggerFactory.getLogger(KafkaJobDataConsumer.class);
+    @Getter
+    private final Job job;
+    private Disposable subscription;
+    private final ErrorStats errorStats = new ErrorStats();
+
+    private class ErrorStats {
+        private int consumerFaultCounter = 0;
+        private boolean kafkaError = false; // eg. overflow
+
+        public void handleOkFromConsumer() {
+            this.consumerFaultCounter = 0;
+        }
+
+        public void handleException(Throwable t) {
+            if (t instanceof WebClientResponseException) {
+                ++this.consumerFaultCounter;
+            } else {
+                kafkaError = true;
+            }
+        }
+
+        public boolean isItHopeless() {
+            final int STOP_AFTER_ERRORS = 5;
+            return kafkaError || consumerFaultCounter > STOP_AFTER_ERRORS;
+        }
+
+        public void resetKafkaErrors() {
+            kafkaError = false;
+        }
+    }
+
+    public KafkaJobDataConsumer(Job job) {
+        this.job = job;
+    }
+
+    public synchronized void start(Many<String> input) {
+        stop();
+        this.errorStats.resetKafkaErrors();
+        this.subscription = getMessagesFromKafka(input, job) //
+                .flatMap(this::postToClient, job.getParameters().getMaxConcurrency()) //
+                .onErrorResume(this::handleError) //
+                .subscribe(this::handleConsumerSentOk, //
+                        t -> stop(), //
+                        () -> logger.warn("KafkaMessageConsumer stopped jobId: {}", job.getId()));
+    }
+
+    private Mono<String> postToClient(String body) {
+        logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), body);
+        MediaType contentType = this.job.isBuffered() ? MediaType.APPLICATION_JSON : null;
+        return job.getConsumerRestClient().post("", body, contentType);
+    }
+
+    public synchronized void stop() {
+        if (this.subscription != null) {
+            subscription.dispose();
+            subscription = null;
+        }
+    }
+
+    public synchronized boolean isRunning() {
+        return this.subscription != null;
+    }
+
+    private Flux<String> getMessagesFromKafka(Many<String> input, Job job) {
+        Flux<String> result = input.asFlux() //
+                .filter(job::isFilterMatch);
+
+        if (job.isBuffered()) {
+            result = result.map(this::quote) //
+                    .bufferTimeout( //
+                            job.getParameters().getBufferTimeout().getMaxSize(), //
+                            job.getParameters().getBufferTimeout().getMaxTime()) //
+                    .map(Object::toString);
+        }
+        return result;
+    }
+
+    private String quote(String str) {
+        final String q = "\"";
+        return q + str.replace(q, "\\\"") + q;
+    }
+
+    private Mono<String> handleError(Throwable t) {
+        logger.warn("exception: {} job: {}", t.getMessage(), job.getId());
+        this.errorStats.handleException(t);
+        if (this.errorStats.isItHopeless()) {
+            return Mono.error(t);
+        } else {
+            return Mono.empty(); // Ignore
+        }
+    }
+
+    private void handleConsumerSentOk(String data) {
+        this.errorStats.handleOkFromConsumer();
+    }
+
+}
diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java
deleted file mode 100644 (file)
index 6079edf..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*-
- * ========================LICENSE_START=================================
- * O-RAN-SC
- * %%
- * Copyright (C) 2021 Nordix Foundation
- * %%
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================LICENSE_END===================================
- */
-
-package org.oran.dmaapadapter.tasks;
-
-import java.time.Duration;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.serialization.IntegerDeserializer;
-import org.apache.kafka.common.serialization.StringDeserializer;
-import org.oran.dmaapadapter.clients.AsyncRestClient;
-import org.oran.dmaapadapter.clients.AsyncRestClientFactory;
-import org.oran.dmaapadapter.configuration.ApplicationConfig;
-import org.oran.dmaapadapter.repository.InfoType;
-import org.oran.dmaapadapter.repository.Job;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import reactor.core.Disposable;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
-import reactor.core.publisher.Sinks;
-import reactor.core.publisher.Sinks.Many;
-import reactor.kafka.receiver.KafkaReceiver;
-import reactor.kafka.receiver.ReceiverOptions;
-
-/**
- * The class fetches incoming requests from DMAAP and sends them further to the
- * consumers that has a job for this InformationType.
- */
-@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
-public class KafkaTopicConsumer {
-    private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumer.class);
-    private final AsyncRestClient consumerRestClient;
-    private final ApplicationConfig applicationConfig;
-    private final InfoType type;
-    private final Many<String> consumerDistributor;
-
-    public KafkaTopicConsumer(ApplicationConfig applicationConfig, InfoType type) {
-        this.applicationConfig = applicationConfig;
-
-        final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 10;
-        this.consumerDistributor = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE);
-
-        AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
-        this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("")
-                : restclientFactory.createRestClientNoHttpProxy("");
-        this.type = type;
-        startKafkaTopicReceiver();
-    }
-
-    private Disposable startKafkaTopicReceiver() {
-        return KafkaReceiver.create(kafkaInputProperties()) //
-                .receive() //
-                .flatMap(this::onReceivedData) //
-                .subscribe(null, //
-                        throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), //
-                        () -> logger.warn("KafkaMessageConsumer stopped"));
-    }
-
-    private Flux<String> onReceivedData(ConsumerRecord<Integer, String> input) {
-        logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value());
-        consumerDistributor.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST);
-        return consumerDistributor.asFlux();
-    }
-
-    public Disposable startDistributeToConsumer(Job job) {
-        return getMessagesFromKafka(job) //
-                .doOnNext(data -> logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), data))
-                .flatMap(body -> consumerRestClient.post(job.getCallbackUrl(), body)) //
-                .onErrorResume(this::handleConsumerErrorResponse) //
-                .subscribe(null, //
-                        throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), //
-                        () -> logger.warn("KafkaMessageConsumer stopped {}", job.getType().getId()));
-    }
-
-    private Flux<String> getMessagesFromKafka(Job job) {
-        if (job.isBuffered()) {
-            return consumerDistributor.asFlux() //
-                    .filter(job::isFilterMatch) //
-                    .bufferTimeout(job.getParameters().bufferTimeout.maxSize,
-                            Duration.ofMillis(job.getParameters().bufferTimeout.maxTimeMiliseconds)) //
-                    .flatMap(o -> Flux.just(o.toString()));
-        } else {
-            return consumerDistributor.asFlux() //
-                    .filter(job::isFilterMatch);
-        }
-    }
-
-    private Mono<String> handleConsumerErrorResponse(Throwable t) {
-        logger.warn("error from CONSUMER {}", t.getMessage());
-        return Mono.empty();
-    }
-
-    private ReceiverOptions<Integer, String> kafkaInputProperties() {
-        Map<String, Object> consumerProps = new HashMap<>();
-        if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) {
-            logger.error("No kafka boostrap server is setup");
-        }
-        consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers());
-        consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor");
-        consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
-        consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-
-        return ReceiverOptions.<Integer, String>create(consumerProps)
-                .subscription(Collections.singleton(this.type.getKafkaInputTopic()));
-    }
-
-}
index 23d9da2..0ed85c6 100644 (file)
@@ -23,57 +23,97 @@ package org.oran.dmaapadapter.tasks;
 import java.util.HashMap;
 import java.util.Map;
 
+import lombok.Getter;
+
 import org.oran.dmaapadapter.configuration.ApplicationConfig;
 import org.oran.dmaapadapter.repository.InfoType;
 import org.oran.dmaapadapter.repository.InfoTypes;
 import org.oran.dmaapadapter.repository.Job;
+import org.oran.dmaapadapter.repository.Jobs;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.scheduling.annotation.EnableScheduling;
+import org.springframework.scheduling.annotation.Scheduled;
 import org.springframework.stereotype.Component;
-import reactor.core.Disposable;
 
-/**
- * The class fetches incoming requests from DMAAP and sends them further to the
- * consumers that has a job for this InformationType.
- */
 @SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
 @Component
+@EnableScheduling
 public class KafkaTopicConsumers {
     private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumers.class);
 
-    private final Map<String, KafkaTopicConsumer> topicConsumers = new HashMap<>();
-    private final Map<String, Disposable> activeSubscriptions = new HashMap<>();
-    private final ApplicationConfig appConfig;
+    private final Map<String, KafkaTopicListener> topicListeners = new HashMap<>(); // Key is typeId
 
-    public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig) {
-        this.appConfig = appConfig;
-    }
+    @Getter
+    private final Map<String, KafkaJobDataConsumer> consumers = new HashMap<>(); // Key is jobId
+
+    private static final int CONSUMER_SUPERVISION_INTERVAL_MS = 1000 * 60 * 3;
+
+    public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types,
+            @Autowired Jobs jobs) {
 
-    public void start(InfoTypes types) {
         for (InfoType type : types.getAll()) {
             if (type.isKafkaTopicDefined()) {
-                KafkaTopicConsumer topicConsumer = new KafkaTopicConsumer(appConfig, type);
-                topicConsumers.put(type.getId(), topicConsumer);
+                KafkaTopicListener topicConsumer = new KafkaTopicListener(appConfig, type);
+                topicListeners.put(type.getId(), topicConsumer);
             }
         }
+
+        jobs.addObserver(new Jobs.Observer() {
+            @Override
+            public void onJobbAdded(Job job) {
+                addJob(job);
+            }
+
+            @Override
+            public void onJobRemoved(Job job) {
+                removeJob(job);
+            }
+
+        });
     }
 
     public synchronized void addJob(Job job) {
-        if (this.activeSubscriptions.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) {
+        if (this.consumers.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) {
             logger.debug("Kafka job added {}", job.getId());
-            KafkaTopicConsumer topicConsumer = topicConsumers.get(job.getType().getId());
-            Disposable subscription = topicConsumer.startDistributeToConsumer(job);
-            activeSubscriptions.put(job.getId(), subscription);
+            KafkaTopicListener topicConsumer = topicListeners.get(job.getType().getId());
+            KafkaJobDataConsumer subscription = new KafkaJobDataConsumer(job);
+            subscription.start(topicConsumer.getOutput());
+            consumers.put(job.getId(), subscription);
         }
     }
 
     public synchronized void removeJob(Job job) {
-        Disposable d = activeSubscriptions.remove(job.getId());
+        KafkaJobDataConsumer d = consumers.remove(job.getId());
         if (d != null) {
             logger.debug("Kafka job removed {}", job.getId());
-            d.dispose();
+            d.stop();
         }
     }
 
+    @Scheduled(fixedRate = CONSUMER_SUPERVISION_INTERVAL_MS)
+    public synchronized void restartNonRunningTasks() {
+
+        for (KafkaJobDataConsumer consumer : consumers.values()) {
+            if (!consumer.isRunning()) {
+                restartTopic(consumer);
+            }
+        }
+    }
+
+    private void restartTopic(KafkaJobDataConsumer consumer) {
+        InfoType type = consumer.getJob().getType();
+        KafkaTopicListener topic = this.topicListeners.get(type.getId());
+        topic.start();
+        restartConsumersOfType(topic, type);
+    }
+
+    private void restartConsumersOfType(KafkaTopicListener topic, InfoType type) {
+        this.consumers.forEach((jobId, consumer) -> {
+            if (consumer.getJob().getType().getId().equals(type.getId())) {
+                consumer.start(topic.getOutput());
+            }
+        });
+    }
 }
diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java
new file mode 100644 (file)
index 0000000..d1045ee
--- /dev/null
@@ -0,0 +1,106 @@
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
+import org.oran.dmaapadapter.repository.InfoType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import reactor.core.Disposable;
+import reactor.core.publisher.Sinks;
+import reactor.core.publisher.Sinks.Many;
+import reactor.kafka.receiver.KafkaReceiver;
+import reactor.kafka.receiver.ReceiverOptions;
+
+/**
+ * The class streams incoming requests from a Kafka topic and sends them further
+ * to a multi cast sink, which several other streams can connect to.
+ */
+@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
+public class KafkaTopicListener {
+    private static final Logger logger = LoggerFactory.getLogger(KafkaTopicListener.class);
+    private final ApplicationConfig applicationConfig;
+    private final InfoType type;
+    private Many<String> output;
+    private Disposable topicReceiverTask;
+
+    public KafkaTopicListener(ApplicationConfig applicationConfig, InfoType type) {
+        this.applicationConfig = applicationConfig;
+        this.type = type;
+        start();
+    }
+
+    public Many<String> getOutput() {
+        return this.output;
+    }
+
+    public void start() {
+        stop();
+        final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 1024 * 10;
+        this.output = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE);
+        logger.debug("Listening to kafka topic: {} type :{}", this.type.getKafkaInputTopic(), type.getId());
+        topicReceiverTask = KafkaReceiver.create(kafkaInputProperties()) //
+                .receive() //
+                .doOnNext(this::onReceivedData) //
+                .subscribe(null, //
+                        this::onReceivedError, //
+                        () -> logger.warn("KafkaTopicReceiver stopped"));
+    }
+
+    private void stop() {
+        if (topicReceiverTask != null) {
+            topicReceiverTask.dispose();
+            topicReceiverTask = null;
+        }
+    }
+
+    private void onReceivedData(ConsumerRecord<String, String> input) {
+        logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value());
+        output.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST);
+    }
+
+    private void onReceivedError(Throwable t) {
+        logger.error("KafkaTopicReceiver error: {}", t.getMessage());
+    }
+
+    private ReceiverOptions<String, String> kafkaInputProperties() {
+        Map<String, Object> consumerProps = new HashMap<>();
+        if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) {
+            logger.error("No kafka boostrap server is setup");
+        }
+        consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers());
+        consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor");
+        consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+        consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+
+        return ReceiverOptions.<String, String>create(consumerProps)
+                .subscription(Collections.singleton(this.type.getKafkaInputTopic()));
+    }
+
+}
index e8b236c..8b5b6cf 100644 (file)
@@ -87,7 +87,6 @@ public class ProducerRegstrationTask {
     }
 
     private void handleRegistrationCompleted() {
-        logger.debug("Registering types and producer completed");
         isRegisteredInEcs = true;
     }
 
@@ -95,6 +94,7 @@ public class ProducerRegstrationTask {
         logger.warn("Registration of producer failed {}", t.getMessage());
     }
 
+    // Returns TRUE if registration is correct
     private Mono<Boolean> checkRegistration() {
         final String url = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID;
         return restClient.get(url) //
@@ -105,7 +105,7 @@ public class ProducerRegstrationTask {
     private Mono<Boolean> isRegisterredInfoCorrect(String registerredInfoStr) {
         ProducerRegistrationInfo registerredInfo = gson.fromJson(registerredInfoStr, ProducerRegistrationInfo.class);
         if (isEqual(producerRegistrationInfo(), registerredInfo)) {
-            logger.trace("Already registered");
+            logger.trace("Already registered in ECS");
             return Mono.just(Boolean.TRUE);
         } else {
             return Mono.just(Boolean.FALSE);
@@ -118,8 +118,8 @@ public class ProducerRegstrationTask {
 
     private Mono<String> registerTypesAndProducer() {
         final int CONCURRENCY = 20;
-        final String producerUrl = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/"
-                + PRODUCER_ID;
+        final String producerUrl =
+                applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID;
 
         return Flux.fromIterable(this.types.getAll()) //
                 .doOnNext(type -> logger.info("Registering type {}", type.getId())) //
index 0ff7c80..290b70a 100644 (file)
@@ -5,6 +5,9 @@
     "filter": {
       "type": "string"
     },
+    "maxConcurrency": {
+      "type": "integer"
+    },
     "bufferTimeout": {
       "type": "object",
       "properties": {
@@ -21,6 +24,5 @@
       ]
     }
   },
-  "required": [
-  ]
-}
+  "required": []
+}
\ No newline at end of file
index 1ca4fac..287c95e 100644 (file)
@@ -227,7 +227,8 @@ class ApplicationTest {
 
         ProducerJobInfo info = new ProducerJobInfo(null, "id", "typeId", "targetUri", "owner", "lastUpdated");
         String body = gson.toJson(info);
-        testErrorCode(restClient().post(jobUrl, body), HttpStatus.NOT_FOUND, "Could not find type");
+        testErrorCode(restClient().post(jobUrl, body, MediaType.APPLICATION_JSON), HttpStatus.NOT_FOUND,
+                "Could not find type");
     }
 
     @Test
index 4b6d901..70e89d6 100644 (file)
@@ -56,6 +56,15 @@ public class ConsumerController {
 
         public TestResults() {}
 
+        public boolean hasReceived(String str) {
+            for (String received : receivedBodies) {
+                if (received.equals(str)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
         public void reset() {
             receivedBodies.clear();
         }
index 8d1dda6..1cf8903 100644 (file)
@@ -105,7 +105,7 @@ public class EcsSimulatorController {
                 new ProducerJobInfo(job.jobDefinition, jobId, job.infoTypeId, job.jobResultUri, job.owner, "TIMESTAMP");
         String body = gson.toJson(request);
         logger.info("ECS Simulator PUT job: {}", body);
-        restClient.post(url, body).block();
+        restClient.post(url, body, MediaType.APPLICATION_JSON).block();
     }
 
     public void deleteJob(String jobId, AsyncRestClient restClient) {
index 31ef970..470e114 100644 (file)
@@ -22,9 +22,11 @@ package org.oran.dmaapadapter;
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.awaitility.Awaitility.await;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import com.google.gson.JsonParser;
 
+import java.time.Duration;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -47,6 +49,8 @@ import org.oran.dmaapadapter.repository.InfoType;
 import org.oran.dmaapadapter.repository.InfoTypes;
 import org.oran.dmaapadapter.repository.Job;
 import org.oran.dmaapadapter.repository.Jobs;
+import org.oran.dmaapadapter.tasks.KafkaJobDataConsumer;
+import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -90,6 +94,9 @@ class IntegrationWithKafka {
     @Autowired
     private EcsSimulatorController ecsSimulatorController;
 
+    @Autowired
+    private KafkaTopicConsumers kafkaTopicConsumers;
+
     private com.google.gson.Gson gson = new com.google.gson.GsonBuilder().create();
 
     private static final Logger logger = LoggerFactory.getLogger(IntegrationWithKafka.class);
@@ -174,9 +181,9 @@ class IntegrationWithKafka {
         return "https://localhost:" + this.applicationConfig.getLocalServerHttpPort();
     }
 
-    private Object jobParametersAsJsonObject(String filter, int maxTimeMiliseconds, int maxSize) {
-        Job.Parameters param = new Job.Parameters(filter,
-                new Job.Parameters.BufferTimeout(maxSize, maxTimeMiliseconds));
+    private Object jobParametersAsJsonObject(String filter, long maxTimeMiliseconds, int maxSize, int maxConcurrency) {
+        Job.Parameters param =
+                new Job.Parameters(filter, new Job.BufferTimeout(maxSize, maxTimeMiliseconds), maxConcurrency);
         String str = gson.toJson(param);
         return jsonObject(str);
     }
@@ -189,13 +196,14 @@ class IntegrationWithKafka {
         }
     }
 
-    private ConsumerJobInfo consumerJobInfo(String filter, int maxTimeMiliseconds, int maxSize) {
+    private ConsumerJobInfo consumerJobInfo(String filter, Duration maxTime, int maxSize, int maxConcurrency) {
         try {
             InfoType type = this.types.getAll().iterator().next();
             String typeId = type.getId();
             String targetUri = baseUrl() + ConsumerController.CONSUMER_TARGET_URL;
-            return new ConsumerJobInfo(typeId, jobParametersAsJsonObject(filter, maxTimeMiliseconds, maxSize), "owner",
-                    targetUri, "");
+            return new ConsumerJobInfo(typeId,
+                    jobParametersAsJsonObject(filter, maxTime.toMillis(), maxSize, maxConcurrency), "owner", targetUri,
+                    "");
         } catch (Exception e) {
             return null;
         }
@@ -218,6 +226,23 @@ class IntegrationWithKafka {
         return SenderRecord.create(new ProducerRecord<>(infoType.getKafkaInputTopic(), i, data + i), i);
     }
 
+    private void sendDataToStream(Flux<SenderRecord<Integer, String, Integer>> dataToSend) {
+        final KafkaSender<Integer, String> sender = KafkaSender.create(senderOptions());
+
+        sender.send(dataToSend) //
+                .doOnError(e -> logger.error("Send failed", e)) //
+                .blockLast();
+
+    }
+
+    private void verifiedReceivedByConsumer(String... strings) {
+        ConsumerController.TestResults consumer = this.consumerController.testResults;
+        await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(strings.length));
+        for (String s : strings) {
+            assertTrue(consumer.hasReceived(s));
+        }
+    }
+
     @Test
     void kafkaIntegrationTest() throws InterruptedException {
         final String JOB_ID1 = "ID1";
@@ -227,31 +252,62 @@ class IntegrationWithKafka {
         await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull());
         assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1);
 
-        // Create a job
-        this.ecsSimulatorController.addJob(consumerJobInfo(".*", 10, 1000), JOB_ID1, restClient());
-        this.ecsSimulatorController.addJob(consumerJobInfo(".*Message_1.*", 0, 0), JOB_ID2, restClient());
-        await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
+        // Create two jobs. One buffering and one with a filter
+        this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ofMillis(400), 1000, 20), JOB_ID1,
+                restClient());
+        this.ecsSimulatorController.addJob(consumerJobInfo("^Message_1$", Duration.ZERO, 0, 1), JOB_ID2, restClient());
 
-        final KafkaSender<Integer, String> sender = KafkaSender.create(senderOptions());
+        await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
 
         var dataToSend = Flux.range(1, 3).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc.
+        sendDataToStream(dataToSend);
 
-        sender.send(dataToSend) //
-                .doOnError(e -> logger.error("Send failed", e)) //
-                .doOnNext(senderResult -> logger.debug("Sent {}", senderResult)) //
-                .doOnError(t -> logger.error("Error {}", t)) //
-                .blockLast();
+        verifiedReceivedByConsumer("Message_1", "[\"Message_1\", \"Message_2\", \"Message_3\"]");
 
-        ConsumerController.TestResults consumer = this.consumerController.testResults;
-        await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(2));
-        assertThat(consumer.receivedBodies.get(0)).isEqualTo("Message_1");
-        assertThat(consumer.receivedBodies.get(1)).isEqualTo("[Message_1, Message_2, Message_3]");
+        // Just for testing quoting
+        this.consumerController.testResults.reset();
+        dataToSend = Flux.just(senderRecord("Message\"_", 1));
+        sendDataToStream(dataToSend);
+        verifiedReceivedByConsumer("[\"Message\\\"_1\"]");
 
-        // Delete the job
+        // Delete the jobs
         this.ecsSimulatorController.deleteJob(JOB_ID1, restClient());
         this.ecsSimulatorController.deleteJob(JOB_ID2, restClient());
 
         await().untilAsserted(() -> assertThat(this.jobs.size()).isZero());
+        await().untilAsserted(() -> assertThat(this.kafkaTopicConsumers.getConsumers()).isEmpty());
+    }
+
+    @Test
+    void kafkaIOverflow() throws InterruptedException {
+        final String JOB_ID1 = "ID1";
+        final String JOB_ID2 = "ID2";
+
+        // Register producer, Register types
+        await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull());
+        assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1);
+
+        // Create two jobs.
+        this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID1, restClient());
+        this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID2, restClient());
+
+        await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
+
+        var dataToSend = Flux.range(1, 1000000).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc.
+        sendDataToStream(dataToSend); // this should overflow
+
+        KafkaJobDataConsumer consumer = kafkaTopicConsumers.getConsumers().values().iterator().next();
+        await().untilAsserted(() -> assertThat(consumer.isRunning()).isFalse());
+        this.consumerController.testResults.reset();
+
+        kafkaTopicConsumers.restartNonRunningTasks();
+        this.ecsSimulatorController.deleteJob(JOB_ID2, restClient()); // Delete one job
+        Thread.sleep(1000); // Restarting the input seems to take some asynch time
+
+        dataToSend = Flux.range(1, 1).map(i -> senderRecord("Howdy_", i));
+        sendDataToStream(dataToSend);
+
+        verifiedReceivedByConsumer("Howdy_1");
     }
 
 }
index bc09fdc..1c7f45c 100644 (file)
@@ -20,7 +20,7 @@
 ##
 ## Build
 ##
-FROM golang:1.17-bullseye AS build
+FROM nexus3.o-ran-sc.org:10001/golang:1.17-bullseye AS build
 WORKDIR /app
 COPY go.mod .
 COPY go.sum .
index 90f8471..2fd7194 100644 (file)
@@ -36,7 +36,7 @@ The configured public key and cerificate shall be PEM-encoded. A self signed cer
 
 At start up the producer will register the configured job types in ICS and also register itself as a producer supporting these types. If ICS is unavailable, the producer will retry to connect indefinetely. The same goes for MR.
 
-Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer.
+Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer until it is available again.
 
 ## Development
 
index 1c42942..6dad5fd 100644 (file)
@@ -34,7 +34,7 @@ import (
 type TypeData struct {
        TypeId        string `json:"id"`
        DMaaPTopicURL string `json:"dmaapTopicUrl"`
-       jobHandler    *jobHandler
+       jobsHandler   *jobsHandler
 }
 
 type JobInfo struct {
@@ -52,8 +52,8 @@ type JobTypesManager interface {
 }
 
 type JobsManager interface {
-       AddJob(JobInfo) error
-       DeleteJob(jobId string)
+       AddJobFromRESTCall(JobInfo) error
+       DeleteJobFromRESTCall(jobId string)
 }
 
 type JobsManagerImpl struct {
@@ -64,17 +64,6 @@ type JobsManagerImpl struct {
        distributeClient restclient.HTTPClient
 }
 
-type jobHandler struct {
-       mu               sync.Mutex
-       typeId           string
-       topicUrl         string
-       jobs             map[string]JobInfo
-       addJobCh         chan JobInfo
-       deleteJobCh      chan string
-       pollClient       restclient.HTTPClient
-       distributeClient restclient.HTTPClient
-}
-
 func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPClient, mrAddr string, distributeClient restclient.HTTPClient) *JobsManagerImpl {
        return &JobsManagerImpl{
                configFile:       typeConfigFilePath,
@@ -85,10 +74,10 @@ func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPCli
        }
 }
 
-func (jm *JobsManagerImpl) AddJob(ji JobInfo) error {
+func (jm *JobsManagerImpl) AddJobFromRESTCall(ji JobInfo) error {
        if err := jm.validateJobInfo(ji); err == nil {
                typeData := jm.allTypes[ji.InfoTypeIdentity]
-               typeData.jobHandler.addJobCh <- ji
+               typeData.jobsHandler.addJobCh <- ji
                log.Debug("Added job: ", ji)
                return nil
        } else {
@@ -96,10 +85,10 @@ func (jm *JobsManagerImpl) AddJob(ji JobInfo) error {
        }
 }
 
-func (jm *JobsManagerImpl) DeleteJob(jobId string) {
+func (jm *JobsManagerImpl) DeleteJobFromRESTCall(jobId string) {
        for _, typeData := range jm.allTypes {
                log.Debugf("Deleting job %v from type %v", jobId, typeData.TypeId)
-               typeData.jobHandler.deleteJobCh <- jobId
+               typeData.jobsHandler.deleteJobCh <- jobId
        }
        log.Debug("Deleted job: ", jobId)
 }
@@ -131,21 +120,10 @@ func (jm *JobsManagerImpl) LoadTypesFromConfiguration() ([]config.TypeDefinition
                return nil, err
        }
        for _, typeDef := range typeDefs.Types {
-               addCh := make(chan JobInfo)
-               deleteCh := make(chan string)
-               jh := jobHandler{
-                       typeId:           typeDef.Id,
-                       topicUrl:         typeDef.DmaapTopicURL,
-                       jobs:             make(map[string]JobInfo),
-                       addJobCh:         addCh,
-                       deleteJobCh:      deleteCh,
-                       pollClient:       jm.pollClient,
-                       distributeClient: jm.distributeClient,
-               }
                jm.allTypes[typeDef.Id] = TypeData{
                        TypeId:        typeDef.Id,
                        DMaaPTopicURL: typeDef.DmaapTopicURL,
-                       jobHandler:    &jh,
+                       jobsHandler:   newJobsHandler(typeDef.Id, typeDef.DmaapTopicURL, jm.pollClient, jm.distributeClient),
                }
        }
        return typeDefs.Types, nil
@@ -159,15 +137,38 @@ func (jm *JobsManagerImpl) GetSupportedTypes() []string {
        return supportedTypes
 }
 
-func (jm *JobsManagerImpl) StartJobs() {
+func (jm *JobsManagerImpl) StartJobsForAllTypes() {
        for _, jobType := range jm.allTypes {
 
-               go jobType.jobHandler.start(jm.mrAddress)
+               go jobType.jobsHandler.startPollingAndDistribution(jm.mrAddress)
+
+       }
+}
+
+type jobsHandler struct {
+       mu               sync.Mutex
+       typeId           string
+       topicUrl         string
+       jobs             map[string]job
+       addJobCh         chan JobInfo
+       deleteJobCh      chan string
+       pollClient       restclient.HTTPClient
+       distributeClient restclient.HTTPClient
+}
 
+func newJobsHandler(typeId string, topicURL string, pollClient restclient.HTTPClient, distributeClient restclient.HTTPClient) *jobsHandler {
+       return &jobsHandler{
+               typeId:           typeId,
+               topicUrl:         topicURL,
+               jobs:             make(map[string]job),
+               addJobCh:         make(chan JobInfo),
+               deleteJobCh:      make(chan string),
+               pollClient:       pollClient,
+               distributeClient: distributeClient,
        }
 }
 
-func (jh *jobHandler) start(mRAddress string) {
+func (jh *jobsHandler) startPollingAndDistribution(mRAddress string) {
        go func() {
                for {
                        jh.pollAndDistributeMessages(mRAddress)
@@ -181,45 +182,104 @@ func (jh *jobHandler) start(mRAddress string) {
        }()
 }
 
-func (jh *jobHandler) pollAndDistributeMessages(mRAddress string) {
+func (jh *jobsHandler) pollAndDistributeMessages(mRAddress string) {
        log.Debugf("Processing jobs for type: %v", jh.typeId)
        messagesBody, error := restclient.Get(mRAddress+jh.topicUrl, jh.pollClient)
        if error != nil {
-               log.Warnf("Error getting data from MR. Cause: %v", error)
+               log.Warn("Error getting data from MR. Cause: ", error)
        }
-       log.Debugf("Received messages: %v", string(messagesBody))
+       log.Debug("Received messages: ", string(messagesBody))
        jh.distributeMessages(messagesBody)
 }
 
-func (jh *jobHandler) distributeMessages(messages []byte) {
+func (jh *jobsHandler) distributeMessages(messages []byte) {
        if len(messages) > 2 {
                jh.mu.Lock()
                defer jh.mu.Unlock()
-               for _, jobInfo := range jh.jobs {
-                       go jh.sendMessagesToConsumer(messages, jobInfo)
+               for _, job := range jh.jobs {
+                       if len(job.messagesChannel) < cap(job.messagesChannel) {
+                               job.messagesChannel <- messages
+                       } else {
+                               jh.emptyMessagesBuffer(job)
+                       }
                }
        }
 }
 
-func (jh *jobHandler) sendMessagesToConsumer(messages []byte, jobInfo JobInfo) {
-       log.Debugf("Processing job: %v", jobInfo.InfoJobIdentity)
-       if postErr := restclient.Post(jobInfo.TargetUri, messages, jh.distributeClient); postErr != nil {
-               log.Warnf("Error posting data for job: %v. Cause: %v", jobInfo, postErr)
+func (jh *jobsHandler) emptyMessagesBuffer(job job) {
+       log.Debug("Emptying message queue for job: ", job.jobInfo.InfoJobIdentity)
+out:
+       for {
+               select {
+               case <-job.messagesChannel:
+               default:
+                       break out
+               }
        }
-       log.Debugf("Messages distributed to consumer: %v.", jobInfo.Owner)
 }
 
-func (jh *jobHandler) monitorManagementChannels() {
+func (jh *jobsHandler) monitorManagementChannels() {
        select {
        case addedJob := <-jh.addJobCh:
-               jh.mu.Lock()
-               log.Debugf("received %v from addJobCh\n", addedJob)
-               jh.jobs[addedJob.InfoJobIdentity] = addedJob
-               jh.mu.Unlock()
+               jh.addJob(addedJob)
        case deletedJob := <-jh.deleteJobCh:
-               jh.mu.Lock()
-               log.Debugf("received %v from deleteJobCh\n", deletedJob)
+               jh.deleteJob(deletedJob)
+       }
+}
+
+func (jh *jobsHandler) addJob(addedJob JobInfo) {
+       jh.mu.Lock()
+       log.Debug("Add job: ", addedJob)
+       newJob := newJob(addedJob, jh.distributeClient)
+       go newJob.start()
+       jh.jobs[addedJob.InfoJobIdentity] = newJob
+       jh.mu.Unlock()
+}
+
+func (jh *jobsHandler) deleteJob(deletedJob string) {
+       jh.mu.Lock()
+       log.Debug("Delete job: ", deletedJob)
+       j, exist := jh.jobs[deletedJob]
+       if exist {
+               j.controlChannel <- struct{}{}
                delete(jh.jobs, deletedJob)
-               jh.mu.Unlock()
        }
+       jh.mu.Unlock()
+}
+
+type job struct {
+       jobInfo         JobInfo
+       client          restclient.HTTPClient
+       messagesChannel chan []byte
+       controlChannel  chan struct{}
+}
+
+func newJob(j JobInfo, c restclient.HTTPClient) job {
+       return job{
+               jobInfo:         j,
+               client:          c,
+               messagesChannel: make(chan []byte, 10),
+               controlChannel:  make(chan struct{}),
+       }
+}
+
+func (j *job) start() {
+out:
+       for {
+               select {
+               case <-j.controlChannel:
+                       log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+                       break out
+               case msg := <-j.messagesChannel:
+                       j.sendMessagesToConsumer(msg)
+               }
+       }
+}
+
+func (j *job) sendMessagesToConsumer(messages []byte) {
+       log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity)
+       if postErr := restclient.Post(j.jobInfo.TargetUri, messages, j.client); postErr != nil {
+               log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr)
+       }
+       log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner)
 }
index 3651a13..552b5fa 100644 (file)
@@ -36,7 +36,7 @@ import (
 
 const typeDefinition = `{"types": [{"id": "type1", "dmaapTopicUrl": "events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/type1"}]}`
 
-func TestGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) {
+func TestJobsManagerGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) {
        assertions := require.New(t)
        typesDir, err := os.MkdirTemp("", "configs")
        if err != nil {
@@ -63,7 +63,7 @@ func TestGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *tes
        assertions.EqualValues([]string{"type1"}, supportedTypes)
 }
 
-func TestManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) {
+func TestJobsManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) {
        assertions := require.New(t)
        managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
        wantedJob := JobInfo{
@@ -74,36 +74,36 @@ func TestManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) {
                InfoJobData:      "{}",
                InfoTypeIdentity: "type1",
        }
-       jobHandler := jobHandler{
+       jobsHandler := jobsHandler{
                addJobCh: make(chan JobInfo)}
        managerUnderTest.allTypes["type1"] = TypeData{
-               TypeId:     "type1",
-               jobHandler: &jobHandler,
+               TypeId:      "type1",
+               jobsHandler: &jobsHandler,
        }
 
        var err error
        go func() {
-               err = managerUnderTest.AddJob(wantedJob)
+               err = managerUnderTest.AddJobFromRESTCall(wantedJob)
        }()
 
        assertions.Nil(err)
-       addedJob := <-jobHandler.addJobCh
+       addedJob := <-jobsHandler.addJobCh
        assertions.Equal(wantedJob, addedJob)
 }
 
-func TestManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) {
        assertions := require.New(t)
        managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
        jobInfo := JobInfo{
                InfoTypeIdentity: "type1",
        }
 
-       err := managerUnderTest.AddJob(jobInfo)
+       err := managerUnderTest.AddJobFromRESTCall(jobInfo)
        assertions.NotNil(err)
        assertions.Equal("type not supported: type1", err.Error())
 }
 
-func TestManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) {
        assertions := require.New(t)
        managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
        managerUnderTest.allTypes["type1"] = TypeData{
@@ -113,12 +113,12 @@ func TestManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) {
        jobInfo := JobInfo{
                InfoTypeIdentity: "type1",
        }
-       err := managerUnderTest.AddJob(jobInfo)
+       err := managerUnderTest.AddJobFromRESTCall(jobInfo)
        assertions.NotNil(err)
        assertions.Equal("missing required job identity: {    <nil> type1}", err.Error())
 }
 
-func TestManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) {
        assertions := require.New(t)
        managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
        managerUnderTest.allTypes["type1"] = TypeData{
@@ -129,38 +129,42 @@ func TestManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) {
                InfoTypeIdentity: "type1",
                InfoJobIdentity:  "job1",
        }
-       err := managerUnderTest.AddJob(jobInfo)
+       err := managerUnderTest.AddJobFromRESTCall(jobInfo)
        assertions.NotNil(err)
        assertions.Equal("missing required target URI: {  job1  <nil> type1}", err.Error())
 }
 
-func TestManagerDeleteJob(t *testing.T) {
+func TestJobsManagerDeleteJob_shouldSendDeleteToChannel(t *testing.T) {
        assertions := require.New(t)
        managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
-       jobHandler := jobHandler{
+       jobsHandler := jobsHandler{
                deleteJobCh: make(chan string)}
        managerUnderTest.allTypes["type1"] = TypeData{
-               TypeId:     "type1",
-               jobHandler: &jobHandler,
+               TypeId:      "type1",
+               jobsHandler: &jobsHandler,
        }
 
-       go managerUnderTest.DeleteJob("job2")
+       go managerUnderTest.DeleteJobFromRESTCall("job2")
 
-       assertions.Equal("job2", <-jobHandler.deleteJobCh)
+       assertions.Equal("job2", <-jobsHandler.deleteJobCh)
 }
 
-func TestHandlerPollAndDistributeMessages(t *testing.T) {
+func TestAddJobToJobsManager_shouldStartPollAndDistributeMessages(t *testing.T) {
        assertions := require.New(t)
 
-       wg := sync.WaitGroup{}
+       called := false
        messages := `[{"message": {"data": "data"}}]`
        pollClientMock := NewTestClient(func(req *http.Request) *http.Response {
                if req.URL.String() == "http://mrAddr/topicUrl" {
                        assertions.Equal(req.Method, "GET")
-                       wg.Done() // Signal that the poll call has been made
+                       body := "[]"
+                       if !called {
+                               called = true
+                               body = messages
+                       }
                        return &http.Response{
                                StatusCode: 200,
-                               Body:       ioutil.NopCloser(bytes.NewReader([]byte(messages))),
+                               Body:       ioutil.NopCloser(bytes.NewReader([]byte(body))),
                                Header:     make(http.Header), // Must be set to non-nil value or it panics
                        }
                }
@@ -168,12 +172,14 @@ func TestHandlerPollAndDistributeMessages(t *testing.T) {
                t.Fail()
                return nil
        })
+
+       wg := sync.WaitGroup{}
        distributeClientMock := NewTestClient(func(req *http.Request) *http.Response {
                if req.URL.String() == "http://consumerHost/target" {
                        assertions.Equal(req.Method, "POST")
-                       assertions.Equal(messages, getBodyAsString(req))
+                       assertions.Equal(messages, getBodyAsString(req, t))
                        assertions.Equal("application/json", req.Header.Get("Content-Type"))
-                       wg.Done() // Signal that the distribution call has been made
+                       wg.Done()
                        return &http.Response{
                                StatusCode: 200,
                                Body:       ioutil.NopCloser(bytes.NewBufferString(`OK`)),
@@ -184,73 +190,73 @@ func TestHandlerPollAndDistributeMessages(t *testing.T) {
                t.Fail()
                return nil
        })
+       jobsHandler := newJobsHandler("type1", "/topicUrl", pollClientMock, distributeClientMock)
+
+       jobsManager := NewJobsManagerImpl("", pollClientMock, "http://mrAddr", distributeClientMock)
+       jobsManager.allTypes["type1"] = TypeData{
+               DMaaPTopicURL: "/topicUrl",
+               TypeId:        "type1",
+               jobsHandler:   jobsHandler,
+       }
+
+       jobsManager.StartJobsForAllTypes()
 
        jobInfo := JobInfo{
                InfoTypeIdentity: "type1",
                InfoJobIdentity:  "job1",
                TargetUri:        "http://consumerHost/target",
        }
-       handlerUnderTest := jobHandler{
-               topicUrl:         "/topicUrl",
-               jobs:             map[string]JobInfo{jobInfo.InfoJobIdentity: jobInfo},
-               pollClient:       pollClientMock,
-               distributeClient: distributeClientMock,
-       }
 
-       wg.Add(2) // Two calls should be made to the server, one to poll and one to distribute
-       handlerUnderTest.pollAndDistributeMessages("http://mrAddr")
+       wg.Add(1) // Wait till the distribution has happened
+       err := jobsManager.AddJobFromRESTCall(jobInfo)
+       assertions.Nil(err)
 
-       if waitTimeout(&wg, 100*time.Millisecond) {
+       if waitTimeout(&wg, 2*time.Second) {
                t.Error("Not all calls to server were made")
                t.Fail()
        }
 }
 
-func TestHandlerAddJob_shouldAddJobToJobsMap(t *testing.T) {
-       assertions := require.New(t)
+func TestJobsHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) {
+       jobToDelete := newJob(JobInfo{}, nil)
+       go jobToDelete.start()
+       jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil)
+       jobsHandler.jobs["job1"] = jobToDelete
 
-       jobInfo := JobInfo{
-               InfoTypeIdentity: "type1",
-               InfoJobIdentity:  "job1",
-               TargetUri:        "http://consumerHost/target",
-       }
+       go jobsHandler.monitorManagementChannels()
 
-       addCh := make(chan JobInfo)
-       handlerUnderTest := jobHandler{
-               mu:       sync.Mutex{},
-               jobs:     map[string]JobInfo{},
-               addJobCh: addCh,
-       }
+       jobsHandler.deleteJobCh <- "job1"
 
-       go func() {
-               addCh <- jobInfo
-       }()
-
-       handlerUnderTest.monitorManagementChannels()
-
-       assertions.Len(handlerUnderTest.jobs, 1)
-       assertions.Equal(jobInfo, handlerUnderTest.jobs["job1"])
+       deleted := false
+       for i := 0; i < 100; i++ {
+               if len(jobsHandler.jobs) == 0 {
+                       deleted = true
+                       break
+               }
+               time.Sleep(time.Microsecond) // Need to drop control to let the job's goroutine do the job
+       }
+       require.New(t).True(deleted, "Job not deleted")
 }
 
-func TestHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) {
-       assertions := require.New(t)
+func TestJobsHandlerEmptyJobMessageBufferWhenItIsFull(t *testing.T) {
+       job := newJob(JobInfo{
+               InfoJobIdentity: "job",
+       }, nil)
 
-       deleteCh := make(chan string)
-       handlerUnderTest := jobHandler{
-               mu: sync.Mutex{},
-               jobs: map[string]JobInfo{"job1": {
-                       InfoJobIdentity: "job1",
-               }},
-               deleteJobCh: deleteCh,
-       }
+       jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil)
+       jobsHandler.jobs["job1"] = job
 
-       go func() {
-               deleteCh <- "job1"
-       }()
+       fillMessagesBuffer(job.messagesChannel)
 
-       handlerUnderTest.monitorManagementChannels()
+       jobsHandler.distributeMessages([]byte("sent msg"))
 
-       assertions.Len(handlerUnderTest.jobs, 0)
+       require.New(t).Len(job.messagesChannel, 0)
+}
+
+func fillMessagesBuffer(mc chan []byte) {
+       for i := 0; i < cap(mc); i++ {
+               mc <- []byte("msg")
+       }
 }
 
 type RoundTripFunc func(req *http.Request) *http.Response
@@ -282,8 +288,10 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
        }
 }
 
-func getBodyAsString(req *http.Request) string {
+func getBodyAsString(req *http.Request, t *testing.T) string {
        buf := new(bytes.Buffer)
-       buf.ReadFrom(req.Body)
+       if _, err := buf.ReadFrom(req.Body); err != nil {
+               t.Fail()
+       }
        return buf.String()
 }
index 8bed1f9..79646c2 100644 (file)
@@ -71,7 +71,7 @@ func (h *ProducerCallbackHandler) addInfoJobHandler(w http.ResponseWriter, r *ht
                http.Error(w, fmt.Sprintf("Invalid json body. Cause: %v", unmarshalErr), http.StatusBadRequest)
                return
        }
-       if err := h.jobsManager.AddJob(jobInfo); err != nil {
+       if err := h.jobsManager.AddJobFromRESTCall(jobInfo); err != nil {
                http.Error(w, fmt.Sprintf("Invalid job info. Cause: %v", err), http.StatusBadRequest)
        }
 }
@@ -84,7 +84,7 @@ func (h *ProducerCallbackHandler) deleteInfoJobHandler(w http.ResponseWriter, r
                return
        }
 
-       h.jobsManager.DeleteJob(id)
+       h.jobsManager.DeleteJobFromRESTCall(id)
 }
 
 type notFoundHandler struct{}
index 5c2027a..1d458c9 100644 (file)
@@ -136,7 +136,7 @@ func TestAddInfoJobHandler(t *testing.T) {
        for _, tt := range tests {
                t.Run(tt.name, func(t *testing.T) {
                        jobHandlerMock := jobhandler.JobHandler{}
-                       jobHandlerMock.On("AddJob", tt.args.job).Return(tt.args.mockReturn)
+                       jobHandlerMock.On("AddJobFromRESTCall", tt.args.job).Return(tt.args.mockReturn)
 
                        callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock)
 
@@ -148,7 +148,7 @@ func TestAddInfoJobHandler(t *testing.T) {
 
                        assertions.Equal(tt.wantedStatus, responseRecorder.Code, tt.name)
                        assertions.Contains(responseRecorder.Body.String(), tt.wantedBody, tt.name)
-                       jobHandlerMock.AssertCalled(t, "AddJob", tt.args.job)
+                       jobHandlerMock.AssertCalled(t, "AddJobFromRESTCall", tt.args.job)
                })
        }
 }
@@ -156,7 +156,7 @@ func TestAddInfoJobHandler(t *testing.T) {
 func TestDeleteJob(t *testing.T) {
        assertions := require.New(t)
        jobHandlerMock := jobhandler.JobHandler{}
-       jobHandlerMock.On("DeleteJob", mock.Anything).Return(nil)
+       jobHandlerMock.On("DeleteJobFromRESTCall", mock.Anything).Return(nil)
 
        callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock)
 
@@ -168,7 +168,7 @@ func TestDeleteJob(t *testing.T) {
 
        assertions.Equal("", responseRecorder.Body.String())
 
-       jobHandlerMock.AssertCalled(t, "DeleteJob", "job1")
+       jobHandlerMock.AssertCalled(t, "DeleteJobFromRESTCall", "job1")
 }
 
 func newRequest(method string, url string, jobInfo *jobs.JobInfo, t *testing.T) *http.Request {
index 74f4edf..194ed75 100644 (file)
@@ -60,7 +60,7 @@ func main() {
        if err := registerTypesAndProducer(jobsManager, configuration.InfoCoordinatorAddress, callbackAddress, retryClient); err != nil {
                log.Fatalf("Stopping producer due to: %v", err)
        }
-       jobsManager.StartJobs()
+       jobsManager.StartJobsForAllTypes()
 
        log.Debug("Starting DMaaP Mediator Producer")
        go func() {
index 8e30b1c..ad20752 100644 (file)
@@ -13,7 +13,7 @@ type JobHandler struct {
 }
 
 // AddJob provides a mock function with given fields: _a0
-func (_m *JobHandler) AddJob(_a0 jobs.JobInfo) error {
+func (_m *JobHandler) AddJobFromRESTCall(_a0 jobs.JobInfo) error {
        ret := _m.Called(_a0)
 
        var r0 error
@@ -27,6 +27,6 @@ func (_m *JobHandler) AddJob(_a0 jobs.JobInfo) error {
 }
 
 // DeleteJob provides a mock function with given fields: jobId
-func (_m *JobHandler) DeleteJob(jobId string) {
+func (_m *JobHandler) DeleteJobFromRESTCall(jobId string) {
        _m.Called(jobId)
 }
index 03e67c0..5cbcaea 100644 (file)
@@ -44,7 +44,7 @@ func main() {
        registerJob(*port)
 
        fmt.Print("Starting consumer on port: ", *port)
-       http.ListenAndServe(fmt.Sprintf(":%v", *port), nil)
+       fmt.Println(http.ListenAndServe(fmt.Sprintf(":%v", *port), nil))
 }
 
 func registerJob(port int) {
index 82ae08d..36ffa39 100644 (file)
@@ -57,7 +57,7 @@ func main() {
        http.HandleFunc("/events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/STD_Fault_Messages", handleData)
 
        fmt.Print("Starting mr on port: ", *port)
-       http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil)
+       fmt.Println(http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil))
 
 }
 
diff --git a/docker-compose/.env b/docker-compose/.env
new file mode 100644 (file)
index 0000000..6fc3528
--- /dev/null
@@ -0,0 +1,64 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+#PMS
+PMS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-policy-agent"
+PMS_IMAGE_TAG="2.2.0"
+
+#A1_SIM
+A1_SIM_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator"
+A1_SIM_IMAGE_TAG="2.1.0"
+
+#RAPP
+RAPP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-r-app-catalogue"
+RAPP_IMAGE_TAG="1.0.0"
+
+#CONTROL_PANEL
+CONTROL_PANEL_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-controlpanel"
+CONTROL_PANEL_IMAGE_TAG="2.2.0"
+
+#GATEWAY
+NONRTRIC_GATEWAY_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-gateway"
+NONRTRIC_GATEWAY_IMAGE_TAG="1.0.0"
+
+#ECS
+ECS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-enrichment-coordinator-service"
+ECS_IMAGE_TAG="1.1.0"
+
+#CONSUMER
+CONSUMER_IMAGE_BASE="eexit/mirror-http-server"
+CONSUMER_IMAGE_TAG="latest"
+
+#ORU
+ORU_APP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-o-ru-closed-loop-recovery"
+ORU_APP_IMAGE_TAG="1.0.0"
+
+#DB
+DB_IMAGE_BASE="mysql/mysql-server"
+DB_IMAGE_TAG="5.6"
+
+#A1CONTROLLER
+A1CONTROLLER_IMAGE_BASE="nexus3.onap.org:10002/onap/sdnc-image"
+A1CONTROLLER_IMAGE_TAG="2.1.2"
+
+#DMAAP_MEDIATOR_GO
+DMAAP_MEDIATOR_GO_BASE="nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer"
+DMAAP_MEDIATOR_GO_TAG="1.0,0"
+
+#DMAAP_MEDIATOR_JAVA
+DMAAP_MEDIATOR_JAVA_BASE="nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor"
+DMAAP_MEDIATOR_JAVA_TAG="1.0.0-SNAPSHOT"
\ No newline at end of file
index 9366ff1..8467946 100644 (file)
@@ -22,7 +22,7 @@ networks:
 
 services:
   a1-sim-OSC:
-    image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+    image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
     container_name: a1-sim-OSC
     networks:
       - default
@@ -35,7 +35,7 @@ services:
       - ALLOW_HTTP=true
 
   a1-sim-STD:
-    image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+    image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
     container_name: a1-sim-STD
     networks:
       - default
@@ -48,7 +48,7 @@ services:
       - ALLOW_HTTP=true
 
   a1-sim-STD-v2:
-    image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+    image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
     container_name: a1-sim-STD-v2
     networks:
       - default
index 340d158..4efdf57 100644 (file)
@@ -22,18 +22,15 @@ networks:
 
 services:
   dmaap-mediator-go:
-    image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer:1.0.0
+    image: "${DMAAP_MEDIATOR_GO_BASE}:${DMAAP_MEDIATOR_GO_TAG}"
     container_name: dmaap-mediator-go
     environment:
       - INFO_PRODUCER_HOST=http://consumer
-      - LOG_LEVEL=Debug
       - INFO_PRODUCER_PORT=8088
       - INFO_COORD_ADDR=http://ecs:8083
-      - MR_HOST=http://dmaap-mr
-      - MR_PORT=3904
-      - INFO_PRODUCER_SUPERVISION_CALLBACK_HOST=http://consumer
-      - INFO_PRODUCER_SUPERVISION_CALLBACK_PORT=8088
-      - INFO_JOB_CALLBACK_HOST=http://consumer
-      - INFO_JOB_CALLBACK_PORT=8088
+      - DMAAP_MR_ADDR=http://dmaap-mr:3904
+      - PRODUCER_CERT_PATH=security/producer.crt
+      - PRODUCER_KEY_PATH=security/producer.key
+      - LOG_LEVEL=Debug
     networks:
       - default
\ No newline at end of file
index 1d53de4..5cfe809 100644 (file)
@@ -22,7 +22,7 @@ networks:
 
 services:
   dmaap-mediator-java:
-    image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor:1.0.0-SNAPSHOT
+    image: "${DMAAP_MEDIATOR_JAVA_BASE}:${DMAAP_MEDIATOR_JAVA_TAG}"
     container_name: dmaap-mediator-java
     networks:
       - default
index 376f734..6de293f 100644 (file)
@@ -22,7 +22,7 @@ networks:
 
 services:
   ecs:
-    image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-enrichment-coordinator-service:1.2.0-SNAPSHOT
+    image: "${ECS_IMAGE_BASE}:${ECS_IMAGE_TAG}"
     container_name: ecs
     networks:
       default:
@@ -32,7 +32,7 @@ services:
       - 8083:8083
       - 8434:8434
   consumer:
-    image: eexit/mirror-http-server
+    image: "${CONSUMER_IMAGE_BASE}:${CONSUMER_IMAGE_TAG}"
     container_name: consumer
     networks:
       - default
index a593e2e..2dfc38c 100644 (file)
@@ -22,7 +22,7 @@ networks:
 
 services:
   policy-agent:
-    image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-policy-agent:2.3.0
+    image: "${PMS_IMAGE_BASE}:${PMS_IMAGE_TAG}"
     container_name: policy-agent
     networks:
       default:
index ade37f7..5477588 100644 (file)
@@ -22,7 +22,7 @@ networks:
 
 services:
   r-app:
-    image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-r-app-catalogue:1.1.0
+    image: "${RAPP_IMAGE_BASE}:${RAPP_IMAGE_TAG}"
     container_name: r-app
     networks:
       default:
index 1b8e064..b7f23b1 100644 (file)
@@ -67,96 +67,85 @@ public class AsyncRestClient {
         logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} POST body: {}", traceTag, body);
         Mono<String> bodyProducer = body != null ? Mono.just(body) : Mono.empty();
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.post() //
-                    .uri(uri) //
-                    .contentType(MediaType.APPLICATION_JSON) //
-                    .body(bodyProducer, String.class);
-                return retrieve(traceTag, request);
-            });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+            .post() //
+            .uri(uri) //
+            .contentType(MediaType.APPLICATION_JSON) //
+            .body(bodyProducer, String.class);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> post(String uri, @Nullable String body) {
         return postForEntity(uri, body) //
-            .flatMap(this::toBody);
+            .map(this::toBody);
     }
 
     public Mono<String> postWithAuthHeader(String uri, String body, String username, String password) {
         Object traceTag = createTraceTag();
         logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} POST body: {}", traceTag, body);
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.post() //
-                    .uri(uri) //
-                    .headers(headers -> headers.setBasicAuth(username, password)) //
-                    .contentType(MediaType.APPLICATION_JSON) //
-                    .bodyValue(body);
-                return retrieve(traceTag, request) //
-                    .flatMap(this::toBody);
-            });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+            .post() //
+            .uri(uri) //
+            .headers(headers -> headers.setBasicAuth(username, password)) //
+            .contentType(MediaType.APPLICATION_JSON) //
+            .bodyValue(body);
+        return retrieve(traceTag, request) //
+            .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> putForEntity(String uri, String body) {
         Object traceTag = createTraceTag();
         logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} PUT body: {}", traceTag, body);
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.put() //
-                    .uri(uri) //
-                    .contentType(MediaType.APPLICATION_JSON) //
-                    .bodyValue(body);
-                return retrieve(traceTag, request);
-            });
+
+        RequestHeadersSpec<?> request = getWebClient() //
+            .put() //
+            .uri(uri) //
+            .contentType(MediaType.APPLICATION_JSON) //
+            .bodyValue(body);
+        return retrieve(traceTag, request);
     }
 
     public Mono<ResponseEntity<String>> putForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
         logger.trace("{} PUT body: <empty>", traceTag);
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.put() //
-                    .uri(uri);
-                return retrieve(traceTag, request);
-            });
+        RequestHeadersSpec<?> request = getWebClient() //
+            .put() //
+            .uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> put(String uri, String body) {
         return putForEntity(uri, body) //
-            .flatMap(this::toBody);
+            .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> getForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri);
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.get().uri(uri);
-                return retrieve(traceTag, request);
-            });
+        RequestHeadersSpec<?> request = getWebClient().get().uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> get(String uri) {
         return getForEntity(uri) //
-            .flatMap(this::toBody);
+            .map(this::toBody);
     }
 
     public Mono<ResponseEntity<String>> deleteForEntity(String uri) {
         Object traceTag = createTraceTag();
         logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri);
-        return getWebClient() //
-            .flatMap(client -> {
-                RequestHeadersSpec<?> request = client.delete().uri(uri);
-                return retrieve(traceTag, request);
-            });
+        RequestHeadersSpec<?> request = getWebClient().delete().uri(uri);
+        return retrieve(traceTag, request);
     }
 
     public Mono<String> delete(String uri) {
         return deleteForEntity(uri) //
-            .flatMap(this::toBody);
+            .map(this::toBody);
     }
 
     private Mono<ResponseEntity<String>> retrieve(Object traceTag, RequestHeadersSpec<?> request) {
@@ -185,11 +174,11 @@ public class AsyncRestClient {
         }
     }
 
-    private Mono<String> toBody(ResponseEntity<String> entity) {
+    private String toBody(ResponseEntity<String> entity) {
         if (entity.getBody() == null) {
-            return Mono.just("");
+            return "";
         } else {
-            return Mono.just(entity.getBody());
+            return entity.getBody();
         }
     }
 
@@ -229,11 +218,10 @@ public class AsyncRestClient {
             .build();
     }
 
-    private Mono<WebClient> getWebClient() {
+    private WebClient getWebClient() {
         if (this.webClient == null) {
             this.webClient = buildWebClient(baseUrl);
         }
-        return Mono.just(buildWebClient(baseUrl));
+        return this.webClient;
     }
-
 }
index 9609e27..8c056fc 100644 (file)
@@ -298,7 +298,7 @@ public class A1eController {
         return validatePutEiJob(eiJobId, eiJobObject) //
             .flatMap(this::startEiJob) //
             .doOnNext(newEiJob -> this.eiJobs.put(newEiJob)) //
-            .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)))
+            .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) //
             .onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.INTERNAL_SERVER_ERROR)));
     }
 
@@ -306,7 +306,7 @@ public class A1eController {
         return this.producerCallbacks.startInfoSubscriptionJob(newEiJob, infoProducers) //
             .doOnNext(noOfAcceptingProducers -> this.logger.debug(
                 "Started EI job {}, number of activated producers: {}", newEiJob.getId(), noOfAcceptingProducers)) //
-            .flatMap(noOfAcceptingProducers -> Mono.just(newEiJob));
+            .map(noOfAcceptingProducers -> newEiJob);
     }
 
     private Mono<InfoJob> validatePutEiJob(String eiJobId, A1eEiJobInfo eiJobInfo) {
index 47a4a2e..b108380 100644 (file)
@@ -308,7 +308,7 @@ public class ConsumerController {
         return validatePutInfoJob(jobId, informationJobObject, performTypeCheck) //
             .flatMap(this::startInfoSubscriptionJob) //
             .doOnNext(this.infoJobs::put) //
-            .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)))
+            .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) //
             .onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.NOT_FOUND)));
     }
 
@@ -441,7 +441,7 @@ public class ConsumerController {
         return this.producerCallbacks.startInfoSubscriptionJob(newInfoJob, infoProducers) //
             .doOnNext(noOfAcceptingProducers -> this.logger.debug("Started job {}, number of activated producers: {}",
                 newInfoJob.getId(), noOfAcceptingProducers)) //
-            .flatMap(noOfAcceptingProducers -> Mono.just(newInfoJob));
+            .map(noOfAcceptingProducers -> newInfoJob);
     }
 
     private Mono<InfoJob> validatePutInfoJob(String jobId, ConsumerJobInfo jobInfo, boolean performTypeCheck) {
index a97bdf6..558ae79 100644 (file)
@@ -84,7 +84,7 @@ public class ProducerCallbacks {
         return Flux.fromIterable(getProducersForJob(infoJob, infoProducers)) //
             .flatMap(infoProducer -> startInfoJob(infoProducer, infoJob, retrySpec)) //
             .collectList() //
-            .flatMap(okResponses -> Mono.just(Integer.valueOf(okResponses.size()))); //
+            .map(okResponses -> Integer.valueOf(okResponses.size())); //
     }
 
     /**
index 65978e1..533199f 100644 (file)
@@ -222,8 +222,7 @@ public class InfoTypeSubscriptions {
     private Mono<String> notifySubscriber(Function<? super SubscriptionInfo, Mono<String>> notifyFunc,
         SubscriptionInfo subscriptionInfo) {
         Retry retrySpec = Retry.backoff(3, Duration.ofSeconds(1));
-        return Mono.just(1) //
-            .flatMap(notUsed -> notifyFunc.apply(subscriptionInfo)) //
+        return notifyFunc.apply(subscriptionInfo) //
             .retryWhen(retrySpec) //
             .onErrorResume(throwable -> {
                 logger.warn("Consumer callback failed {}, removing subscription {}", throwable.getMessage(),
index db7c29b..08c5fc8 100644 (file)
@@ -80,7 +80,7 @@ public class ProducerSupervision {
             })//
             .doOnNext(response -> handleRespondingProducer(response, producer))
             .flatMap(response -> checkProducerJobs(producer)) //
-            .flatMap(responses -> Mono.just(producer));
+            .map(responses -> producer);
     }
 
     private Mono<?> checkProducerJobs(InfoProducer producer) {
index 4418429..8c8ce5f 100644 (file)
@@ -1028,7 +1028,7 @@ class ApplicationTest {
         // Test that subscriptions are removed for a unresponsive consumer
 
         // PUT a subscription with a junk callback
-        final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "JUNK", "owner");
+        final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "/JUNK", "owner");
         String body = gson.toJson(info);
         restClient().putForEntity(typeSubscriptionUrl() + "/subscriptionId", body).block();
         assertThat(this.infoTypeSubscriptions.size()).isEqualTo(1);
index 3b916e4..558d6d2 160000 (submodule)
--- a/onap/oran
+++ b/onap/oran
@@ -1 +1 @@
-Subproject commit 3b916e4dc5777863cb4ee873b41ee460fb9aec27
+Subproject commit 558d6d2de33bb8cf4b16df980a0cdf3b1747a8e2
index 5d718b0..e4ffe75 100755 (executable)
@@ -24,7 +24,7 @@ TC_ONELINE_DESCR="Sanity test, create service and then create,update and delete
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY "
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
 
@@ -119,7 +119,8 @@ for __httpx in $TESTED_PROTOCOLS ; do
             start_ric_simulators ricsim_g3 1  STD_2.0.0
         fi
 
-        start_mr
+        start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+                    "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
 
         start_cr
 
index da4bf1e..4c261b4 100755 (executable)
 TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter"
 
 #App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
 
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
@@ -81,7 +81,9 @@ start_ecs NOPROXY $SIM_GROUP/$ECS_COMPOSE_DIR/$ECS_CONFIG_FILE
 
 set_ecs_trace
 
-start_mr
+start_mr    "unauthenticated.dmaapmed.json" "/events" "dmaapmediatorproducer/STD_Fault_Messages" \
+            "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+            "unauthenticated.dmaapadp_kafka.text" "/events" "dmaapadapterproducer/msgs"
 
 start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
 
@@ -93,23 +95,33 @@ ecs_equal json:data-producer/v1/info-producers 2 60
 
 # Check producers
 ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
-ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
 ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
 
 
-# Create jobs for adapter
+# Create jobs for adapter - CR stores data as MD5 hash
 start_timer "Create adapter jobs: $NUM_JOBS"
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+    ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i"?storeas=md5" info-owner-adp-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+
 done
 print_timer "Create adapter jobs: $NUM_JOBS"
 
-# Create jobs for mediator
+# Create jobs for adapter kafka - CR stores data as MD5 hash
+start_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    ecs_api_idc_put_job 201 job-adp-kafka-$i ExampleInformationTypeKafka $CR_SERVICE_TEXT_PATH/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-kafka-$i testdata/dmaap-adapter/job-template-1-kafka.json
+
+done
+print_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+
+# Create jobs for mediator - CR stores data as MD5 hash
 start_timer "Create mediator jobs: $NUM_JOBS"
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+    ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i"?storeas=md5" info-owner-med-$i $CR_SERVICE_APP_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
 done
 print_timer "Create mediator jobs: $NUM_JOBS"
 
@@ -118,11 +130,117 @@ for ((i=1; i<=$NUM_JOBS; i++))
 do
     ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30
     ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30
+    ecs_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
 done
 
+
 EXPECTED_DATA_DELIV=0
 
-# Send data to adapter via mr
+mr_api_generate_json_payload_file 1 ./tmp/data_for_dmaap_test.json
+mr_api_generate_text_payload_file 1 ./tmp/data_for_dmaap_test.txt
+
+## Send json file via message-router to adapter
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+## Send text file via message-router to adapter kafka
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter kafka
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+    cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+done
+
+## Send json file via message-router to mediator
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from mediator
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+    cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+# Send small json via message-router to adapter
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
 
@@ -131,9 +249,18 @@ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
 start_timer "Data delivery adapter, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
 print_timer "Data delivery adapter, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
 
-# Send data to mediator
+# Send small text via message-routere to adapter
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------1'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------3'
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapte kafkar, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery adapte kafkar, 2 strings per job"
+
+# Send small json via message-router to mediator
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
 
@@ -142,73 +269,85 @@ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
 start_timer "Data delivery mediator, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
 print_timer "Data delivery mediator, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
 
 # Check received number of messages for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_equal received_callbacks?id=job-med-data$i 2
-    cr_equal received_callbacks?id=job-adp-data$i 2
+    cr_equal received_callbacks?id=job-med-data$i 7
+    cr_equal received_callbacks?id=job-adp-data$i 7
+    cr_equal received_callbacks?id=job-adp-kafka-data$i 7
 done
 
 # Check received data and order for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}'
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-0"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-2"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-1"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-3"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------1'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------3'
 done
 
 # Set delay in the callback receiver to slow down callbacks
-SEC_DELAY=5
+SEC_DELAY=2
 cr_delay_callback 200 $SEC_DELAY
 
-# Send data to adapter via mr
+# Send small json via message-router to adapter
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}'
 mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}'
 
 # Wait for data recetption, adapter
 EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
-print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
+
 
+# Send small text via message-router to adapter kafka
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------5'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------7'
 
-# Send data to mediator
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
+print_timer "Data delivery adapter with kafka $SEC_DELAY seconds delay in consumer, 2 strings per job"
+
+
+# Send small json via message-router to mediator
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}'
 mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}'
 
 # Wait for data reception, mediator
 EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
 cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000
-print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
 
 # Check received number of messages for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_equal received_callbacks?id=job-med-data$i 4
-    cr_equal received_callbacks?id=job-adp-data$i 4
+    cr_equal received_callbacks?id=job-med-data$i 9
+    cr_equal received_callbacks?id=job-adp-data$i 9
+    cr_equal received_callbacks?id=job-adp-kafka-data$i 9
 done
 
 # Check received data and order for mediator and adapter callbacks
 for ((i=1; i<=$NUM_JOBS; i++))
 do
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}'
-    cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}'
-    cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-4"}'
+    cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-6"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-5"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-7"}'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------5'
+    cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------7'
 done
 
-
-
 #### TEST COMPLETE ####
 
 store_logs          END
 
 print_result
 
-auto_clean_environment
\ No newline at end of file
+auto_clean_environment
index 15b5c5b..03697bc 100755 (executable)
@@ -23,7 +23,7 @@ TC_ONELINE_DESCR="ONAP Use case REQ-626"
 DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
 
 #App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC KUBEPROXY NGW"
 #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
 KUBE_PRESTARTED_IMAGES=""
 
@@ -99,7 +99,8 @@ for interface in $TESTED_VARIANTS ; do
 
     start_ric_simulators $RIC_SIM_PREFIX"_g3" $STD_NUM_RICS STD_2.0.0
 
-    start_mr
+    start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+                "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
 
     start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
 
index 47b4514..27bdb4e 100755 (executable)
@@ -56,7 +56,11 @@ setup_testenvironment
 
 clean_environment
 start_kube_proxy
-start_mr
+start_mr    "$MR_READ_TOPIC"  "/events" "users/policy-agent" \
+            "$MR_WRITE_TOPIC" "/events" "users/mr-stub" \
+            "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+            "unauthenticated.dmaapmed.json" "/events" "maapmediatorproducer/STD_Fault_Messages"
+
 if [ $RUNMODE == "KUBE" ]; then
     :
 else
diff --git a/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka b/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka
new file mode 100644 (file)
index 0000000..290b70a
--- /dev/null
@@ -0,0 +1,28 @@
+{
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "type": "object",
+  "properties": {
+    "filter": {
+      "type": "string"
+    },
+    "maxConcurrency": {
+      "type": "integer"
+    },
+    "bufferTimeout": {
+      "type": "object",
+      "properties": {
+        "maxSize": {
+          "type": "integer"
+        },
+        "maxTimeMiliseconds": {
+          "type": "integer"
+        }
+      },
+      "required": [
+        "maxSize",
+        "maxTimeMiliseconds"
+      ]
+    }
+  },
+  "required": []
+}
\ No newline at end of file
diff --git a/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json b/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json
new file mode 100644 (file)
index 0000000..d549397
--- /dev/null
@@ -0,0 +1,7 @@
+{
+  "maxConcurrency": 1,
+  "bufferTimeout": {
+      "maxSize": 1,
+      "maxTimeMiliseconds": 0
+  }
+}
\ No newline at end of file
index 18b9656..3577cfa 100644 (file)
@@ -153,6 +153,7 @@ The script can be started with these arguments
 | `--print-stats` |  Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
 | `--override <file>` |  Override setting from the file supplied by --env-file |
 | `--pre-clean` |  Clean kube resouces when running docker and vice versa |
+| `--gen-stats`  | Collect container/pod runtime statistics |
 | `help` | Print this info along with the test script description and the list of app short names supported |
 
 ## Function: setup_testenvironment ##
index a1fd657..4cedad1 100644 (file)
@@ -91,6 +91,19 @@ __PA_initial_setup() {
        use_agent_rest_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PA_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "PA $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "PA $POLICY_AGENT_APP_NAME"
+       fi
+}
+
+
 #######################################################
 
 ###########################
index 17f80a5..f2777eb 100644 (file)
@@ -23,7 +23,8 @@
 # one for sending the requests and one for receiving the response
 # but only when using the DMAAP interface
 # REST or DMAAP is controlled of the base url of $XX_ADAPTER
-# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file>]) | (PA|ECS RESPONSE <correlation-id>)
+# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file> [mime-type]]) | (PA|ECS RESPONSE <correlation-id>)
+# Default mime type for file is application/json unless specified in parameter mime-type
 # (Not for test scripts)
 __do_curl_to_api() {
        TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
@@ -39,6 +40,7 @@ __do_curl_to_api() {
 
        paramError=0
        input_url=$3
+       fname=$4
     if [ $# -gt 0 ]; then
         if [ $1 == "PA" ]; then
                        __ADAPTER=$PA_ADAPTER
@@ -75,17 +77,21 @@ __do_curl_to_api() {
                        __ADAPTER=$MR_STUB_ADAPTER
                        __ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE
             __RETRY_CODES=""
-        else
+        elif [ $1 == "DMAAPMR" ]; then
+                       __ADAPTER=$MR_DMAAP_ADAPTER_HTTP
+                       __ADAPTER_TYPE=$MR_DMAAP_ADAPTER_TYPE
+            __RETRY_CODES=""
+               else
             paramError=1
         fi
-               if [ $__ADAPTER_TYPE == "MR-HTTP" ]; then
+               if [ "$__ADAPTER_TYPE" == "MR-HTTP" ]; then
                        __ADAPTER=$MR_ADAPTER_HTTP
                fi
-               if [ $__ADAPTER_TYPE == "MR-HTTPS" ]; then
+               if [ "$__ADAPTER_TYPE" == "MR-HTTPS" ]; then
                        __ADAPTER=$MR_ADAPTER_HTTPS
                fi
     fi
-    if [ $# -lt 3 ] || [ $# -gt 4 ]; then
+    if [ $# -lt 3 ] || [ $# -gt 5 ]; then
                paramError=1
     else
                timeout=""
@@ -100,6 +106,10 @@ __do_curl_to_api() {
                fi
                if [ $# -gt 3 ]; then
                        content=" -H Content-Type:application/json"
+                       fname=$4
+                       if [ $# -gt 4 ]; then
+                               content=" -H Content-Type:"$5
+                       fi
                fi
                if [ $2 == "GET" ] || [ $2 == "GET_BATCH" ]; then
                        oper="GET"
@@ -108,15 +118,15 @@ __do_curl_to_api() {
                        fi
                elif [ $2 == "PUT" ] || [ $2 == "PUT_BATCH" ]; then
                        oper="PUT"
-                       if [ $# -eq 4 ]; then
-                               file=" --data-binary @$4"
+                       if [ $# -gt 3 ]; then
+                               file=" --data-binary @$fname"
                        fi
                        accept=" -H accept:application/json"
                elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then
                        oper="POST"
                        accept=" -H accept:*/*"
-                       if [ $# -eq 4 ]; then
-                               file=" --data-binary @$4"
+                       if [ $# -gt 3 ]; then
+                               file=" --data-binary @$fname"
                                accept=" -H accept:application/json"
                        fi
                elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then
@@ -153,8 +163,8 @@ __do_curl_to_api() {
         oper=" -X "$oper
         curlString="curl -k $proxyflag "${oper}${timeout}${httpcode}${accept}${content}${url}${file}
         echo " CMD: "$curlString >> $HTTPLOG
-               if [ $# -eq 4 ]; then
-                       echo " FILE: $(<$4)" >> $HTTPLOG
+               if [ $# -gt 3 ]; then
+                       echo " FILE: $(<$fname)" >> $HTTPLOG
                fi
 
                # Do retry for configured response codes, otherwise only one attempt
@@ -190,12 +200,12 @@ __do_curl_to_api() {
     else
                if [ $oper != "RESPONSE" ]; then
                        requestUrl=$input_url
-                       if [ $2 == "PUT" ] && [ $# -eq 4 ]; then
-                               payload="$(cat $4 | tr -d '\n' | tr -d ' ' )"
+                       if [ $2 == "PUT" ] && [ $# -gt 3 ]; then
+                               payload="$(cat $fname | tr -d '\n' | tr -d ' ' )"
                                echo "payload: "$payload >> $HTTPLOG
                                file=" --data-binary "$payload
-                       elif [ $# -eq 4 ]; then
-                               echo " FILE: $(cat $4)" >> $HTTPLOG
+                       elif [ $# -gt 3 ]; then
+                               echo " FILE: $(cat $fname)" >> $HTTPLOG
                        fi
                        #urlencode the request url since it will be carried by send-request url
                        requestUrl=$(python3 -c "from __future__ import print_function; import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))"  "$input_url")
index 747eaab..cd1b16c 100644 (file)
@@ -165,6 +165,21 @@ __CBS_initial_setup() {
        CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CONSUL_statisics_setup() {
+       echo ""
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CBS_statisics_setup() {
+       echo ""
+}
 #######################################################
 
 
index eda6fe3..295e16a 100644 (file)
@@ -91,6 +91,19 @@ __CP_store_docker_logs() {
 __CP_initial_setup() {
        use_control_panel_http
 }
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CP_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "CP $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "CP $CONTROL_PANEL_APP_NAME"
+       fi
+}
+
 #######################################################
 
 
index 4027f30..b3ef07b 100644 (file)
@@ -73,7 +73,7 @@ __SDNC_image_data() {
 # All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
 # This function is called for apps fully managed by the test script
 __SDNC_kube_scale_zero() {
-       __kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+       __kube_scale_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
 }
 
 # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
@@ -85,7 +85,7 @@ __SDNC_kube_scale_zero_and_wait() {
 # Delete all kube resouces for the app
 # This function is called for apps managed by the test script.
 __SDNC_kube_delete_all() {
-       __kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+       __kube_delete_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
 }
 
 # Store docker logs
@@ -93,9 +93,9 @@ __SDNC_kube_delete_all() {
 # args: <log-dir> <file-prexix>
 __SDNC_store_docker_logs() {
        if [ $RUNMODE == "KUBE" ]; then
-               kubectl  logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
-               podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
-               kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+               kubectl  logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+               podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+               kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
        else
                docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
        fi
@@ -108,6 +108,18 @@ __SDNC_initial_setup() {
        use_sdnc_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__SDNC_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "SDNC $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE"
+       else
+               echo "SDNC $SDNC_APP_NAME"
+       fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to SDNC
@@ -135,8 +147,8 @@ __sdnc_set_protocoll() {
        SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2  # docker access, container->container and script->container via proxy
        SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL
        if [ $RUNMODE == "KUBE" ]; then
-               SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
-               SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL
+               SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SDNC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+               SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SDNC_NAMESPACE":"$1$SDNC_API_URL
        fi
        echo ""
 
@@ -145,7 +157,7 @@ __sdnc_set_protocoll() {
 # Export env vars for config files, docker compose and kube resources
 # args:
 __sdnc_export_vars() {
-       export KUBE_SNDC_NAMESPACE
+       export KUBE_SDNC_NAMESPACE
        export DOCKER_SIM_NWNAME
 
        export SDNC_APP_NAME
@@ -199,7 +211,7 @@ start_sdnc() {
                if [ $retcode_p -eq 0 ]; then
                        echo -e " Using existing $SDNC_APP_NAME deployment and service"
                        echo " Setting SDNC replicas=1"
-                       __kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1
+                       __kube_scale deployment $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE 1
                fi
 
                                # Check if app shall be fully managed by the test script
@@ -208,7 +220,7 @@ start_sdnc() {
                        echo -e " Creating $SDNC_APP_NAME app and expose service"
 
                        #Check if namespace exists, if not create it
-                       __kube_create_namespace $KUBE_SNDC_NAMESPACE
+                       __kube_create_namespace $KUBE_SDNC_NAMESPACE
 
                        __sdnc_export_vars
 
index ba46510..a537bc8 100644 (file)
@@ -107,6 +107,18 @@ __CR_initial_setup() {
        use_cr_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CR_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "CR $CR_APP_NAME $KUBE_SIM_NAMESPACE"
+       else
+               echo "CR $CR_APP_NAME"
+       fi
+}
+
 #######################################################
 
 ################
@@ -142,6 +154,7 @@ __cr_set_protocoll() {
        fi
        # Service paths are used in test script to provide callbacck urls to app
        CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR  #Only for messages from dmaap adapter/mediator
+       CR_SERVICE_TEXT_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_TEXT  #Callbacks for text payload
        CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK    #For general callbacks from apps
 
        # CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
@@ -573,6 +586,10 @@ cr_api_check_single_genric_json_event() {
        body=${res:0:${#res}-3}
        targetJson=$3
 
+       if [ $targetJson == "EMPTY" ] && [ ${#body} -ne 0 ]; then
+               __log_test_fail_body
+               return 1
+       fi
        echo " TARGET JSON: $targetJson" >> $HTTPLOG
        res=$(python3 ../common/compare_json.py "$targetJson" "$body")
 
@@ -581,6 +598,126 @@ cr_api_check_single_genric_json_event() {
                return 1
        fi
 
+       __log_test_pass
+       return 0
+}
+
+# CR API: Check a single (oldest) json in md5 format (or none if empty) for path.
+# Note that if a json message is given, it shall be compact, no ws except inside string.
+# The MD5 will generate different hash if ws is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-msg> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5() {
+       __log_test_start $@
+
+       if [ $# -ne 3 ]; then
+               __print_err "<response-code> <topic-url> (EMPTY | <data-msg> )" $@
+               return 1
+       fi
+
+       query="/get-event/"$2
+       res="$(__do_curl_to_api CR GET $query)"
+       status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+       body=${res:0:${#res}-3}
+       if [ $3 == "EMPTY" ]; then
+               if [ ${#body} -ne 0 ]; then
+                       __log_test_fail_body
+                       return 1
+               else
+                       __log_test_pass
+                       return 0
+               fi
+       fi
+       command -v md5 > /dev/null # Mac
+       if [ $? -eq 0 ]; then
+               targetMd5=$(echo -n "$3" | md5)
+       else
+               command -v md5sum > /dev/null # Linux
+               if [ $? -eq 0 ]; then
+                       targetMd5=$(echo -n "$3" | md5sum | cut -d' ' -f 1)  # Need to cut additional info printed by cmd
+               else
+                       __log_test_fail_general "Command md5 nor md5sum is available"
+                       return 1
+               fi
+       fi
+       targetMd5="\""$targetMd5"\"" #Quotes needed
+
+       echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+       if [ "$body" != "$targetMd5" ]; then
+               __log_test_fail_body
+               return 1
+       fi
+
+       __log_test_pass
+       return 0
+}
+
+# CR API: Check a single (oldest) event in md5 format (or none if empty) for path.
+# Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines.
+# The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-file> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5_file() {
+       __log_test_start $@
+
+       if [ $# -ne 3 ]; then
+               __print_err "<response-code> <topic-url> (EMPTY | <data-file> )" $@
+               return 1
+       fi
+
+       query="/get-event/"$2
+       res="$(__do_curl_to_api CR GET $query)"
+       status=${res:${#res}-3}
+
+       if [ $status -ne $1 ]; then
+               __log_test_fail_status_code $1 $status
+               return 1
+       fi
+       body=${res:0:${#res}-3}
+       if [ $3 == "EMPTY" ]; then
+               if [ ${#body} -ne 0 ]; then
+                       __log_test_fail_body
+                       return 1
+               else
+                       __log_test_pass
+                       return 0
+               fi
+       fi
+
+       if [ ! -f $3 ]; then
+               __log_test_fail_general "File $3 does not exist"
+               return 1
+       fi
+
+       filedata=$(cat $3)
+
+       command -v md5 > /dev/null # Mac
+       if [ $? -eq 0 ]; then
+               targetMd5=$(echo -n "$filedata" | md5)
+       else
+               command -v md5sum > /dev/null # Linux
+               if [ $? -eq 0 ]; then
+                       targetMd5=$(echo -n "$filedata" | md5sum | cut -d' ' -f 1)  # Need to cut additional info printed by cmd
+               else
+                       __log_test_fail_general "Command md5 nor md5sum is available"
+                       return 1
+               fi
+       fi
+       targetMd5="\""$targetMd5"\""   #Quotes needed
+
+       echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+       if [ "$body" != "$targetMd5" ]; then
+               __log_test_fail_body
+               return 1
+       fi
+
        __log_test_pass
        return 0
 }
\ No newline at end of file
index 26da2d0..9b7571f 100644 (file)
@@ -92,6 +92,18 @@ __DMAAPADP_initial_setup() {
        use_dmaapadp_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPADP_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "DMAAPADP $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "DMAAPADP $DMAAP_ADP_APP_NAME"
+       fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap adapter
index 16e1ad7..5188a45 100644 (file)
@@ -92,6 +92,18 @@ __DMAAPMED_initial_setup() {
        use_dmaapmed_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMED_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "DMAAPMED $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "DMAAPMED $DMAAP_MED_APP_NAME"
+       fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Dmaap mediator
index 2b434f1..b28c061 100644 (file)
@@ -91,6 +91,18 @@ __ECS_initial_setup() {
        use_ecs_rest_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__ECS_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "ECS $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "ECS $ECS_APP_NAME"
+       fi
+}
+
 #######################################################
 
 
index ee617ef..d8f1707 100644 (file)
@@ -92,6 +92,18 @@ __NGW_initial_setup() {
        use_gateway_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__NGW_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "NGW $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "NGW $NRT_GATEWAY_APP_NAME"
+       fi
+}
+
 #######################################################
 
 
diff --git a/test/common/genstat.sh b/test/common/genstat.sh
new file mode 100755 (executable)
index 0000000..3c329d9
--- /dev/null
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2020 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+# This script collects container statistics to a file. Data is separated with semicolon.
+# Works for both docker container and kubernetes pods.
+# Relies on 'docker stats' so will not work for other container runtimes.
+# Used by the test env.
+
+# args: docker <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*
+# or
+# args: kube <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*
+
+print_usage() {
+  echo "Usage: genstat.sh DOCKER <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*"
+  echo "or"
+  echo "Usage: genstat.sh KUBE <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*"
+}
+
+STARTTIME=-1
+
+if [ $# -lt 4 ]; then
+  print_usage
+  exit 1
+fi
+if [ $1 == "DOCKER" ]; then
+  STAT_TYPE=$1
+  shift
+  STARTTIME=$1
+  shift
+  LOGFILE=$1
+  shift
+  if [ $(($#%2)) -ne 0 ]; then
+    print_usage
+    exit 1
+  fi
+elif [ $1 == "KUBE" ]; then
+  STAT_TYPE=$1
+  shift
+  STARTTIME=$1
+  shift
+  LOGFILE=$1
+  shift
+  if [ $(($#%3)) -ne 0 ]; then
+    print_usage
+    exit 1
+  fi
+else
+  print_usage
+  exit 1
+fi
+
+
+echo "Time;Name;PIDS;CPU perc;Mem perc" > $LOGFILE
+
+if [ "$STARTTIME" -ne -1 ]; then
+    STARTTIME=$(($SECONDS-$STARTTIME))
+fi
+
+while [ true ]; do
+  docker stats --no-stream --format "table {{.Name}};{{.PIDs}};{{.CPUPerc}};{{.MemPerc}}" > tmp/.tmp_stat_out.txt
+  if [ "$STARTTIME" -eq -1 ]; then
+    STARTTIME=$SECONDS
+  fi
+  CTIME=$(($SECONDS-$STARTTIME))
+
+  TMP_APPS=""
+
+  while read -r line; do
+    APP_LIST=(${@})
+    if [ $STAT_TYPE == "DOCKER" ]; then
+      for ((i=0; i<$#; i=i+2)); do
+        SAPP=${APP_LIST[$i]}
+        APP=${APP_LIST[$i+1]}
+        d=$(echo $line | grep -v "k8s" | grep $APP)
+        if [ ! -z $d ]; then
+          d=$(echo $d | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+          echo "$SAPP;$CTIME;$d" >> $LOGFILE
+          TMP_APPS=$TMP_APPS" $SAPP "
+        fi
+      done
+    else
+      for ((i=0; i<$#; i=i+3)); do
+        SAPP=${APP_LIST[$i]}
+        APP=${APP_LIST[$i+1]}
+        NS=${APP_LIST[$i+2]}
+        d=$(echo "$line" | grep -v "k8s_POD" | grep "k8s" | grep $APP | grep $NS)
+        if [ ! -z "$d" ]; then
+          d=$(echo "$d" | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+          data="$SAPP-$NS;$CTIME;$d"
+          echo $data >> $LOGFILE
+          TMP_APPS=$TMP_APPS" $SAPP-$NS "
+        fi
+      done
+    fi
+  done < tmp/.tmp_stat_out.txt
+
+  APP_LIST=(${@})
+  if [ $STAT_TYPE == "DOCKER" ]; then
+    for ((i=0; i<$#; i=i+2)); do
+      SAPP=${APP_LIST[$i]}
+      APP=${APP_LIST[$i+1]}
+      if [[ $TMP_APPS != *" $SAPP "* ]]; then
+        data="$SAPP;$CTIME;0;0,00;0,00"
+        echo $data >> $LOGFILE
+      fi
+    done
+  else
+    for ((i=0; i<$#; i=i+3)); do
+      SAPP=${APP_LIST[$i]}
+      APP=${APP_LIST[$i+1]}
+      NS=${APP_LIST[$i+2]}
+      if [[ $TMP_APPS != *" $SAPP-$NS "* ]]; then
+        data="$SAPP-$NS;$CTIME;0;0,00;0,00"
+        echo $data >> $LOGFILE
+      fi
+    done
+  fi
+  sleep 1
+done
index 56ce6d4..3378a1d 100644 (file)
@@ -106,6 +106,18 @@ __HTTPPROXY_initial_setup() {
        :
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HTTPPROXY_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "HTTPPROXY $HTTP_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+       else
+               echo "HTTPPROXY $HTTP_PROXY_APP_NAME"
+       fi
+}
+
 #######################################################
 
 
index dcaaf80..eb4600c 100644 (file)
@@ -107,6 +107,18 @@ __KUBEPROXY_initial_setup() {
        use_kube_proxy_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__KUBEPROXY_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "KUBEPROXXY $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+       else
+               echo "KUBEPROXXY $KUBE_PROXY_APP_NAME"
+       fi
+}
+
 #######################################################
 
 ## Access to Kube http proxy
index c6a5a2c..da3e34d 100755 (executable)
@@ -193,19 +193,84 @@ __DMAAPMR_initial_setup() {
        :  # handle by __MR_initial_setup
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__MR_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "MR $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE"
+       else
+               echo "MR $MR_STUB_APP_NAME"
+       fi
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMR_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo ""
+       else
+               echo ""
+       fi
+}
 
 #######################################################
 
+# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
+#
+# 'MR-STUB only' is started when only 'MR' is included in the test script. Both the test scripts and app will then use MR-STUB as a message-router simulator.
+#
+# 'MR-STUB + MESSAGE-ROUTER' is started when 'MR' and 'DMAAPMR' is included in the testscripts. DMAAPMR is the real message router including kafka and zookeeper.
+# In this configuration, MR-STUB is used by the test-script as frontend to the message-router while app are using the real message-router.
+#
+# DOCKER                                                                      KUBE
+# ---------------------------------------------------------------------------------------------------------------------------------------------------
+
+#                             MR-STUB                                                             MR-STUB
+#                             +++++++                                                             +++++++
+# localhost                               container                           service                                 pod
+# ==============================================================================================================================================
+# 10 MR_STUB_LOCALHOST_PORT          ->   13 MR_INTERNAL_PORT                 15 MR_EXTERNAL_PORT                ->   17 MR_INTERNAL_PORT
+# 12 MR_STUB_LOCALHOST_SECURE_PORT   ->   14 MR_INTERNAL_SECURE_PORT          16 MR_EXTERNAL_SECURE_PORT                ->   18 MR_INTERNAL_SECURE_PORT
+
+
+
+#                             MESSAGE-ROUTER                                                      MESSAGE-ROUTER
+#                             ++++++++++++++                                                      ++++++++++++++
+# localhost                               container                           service                                 pod
+# ===================================================================================================================================================
+# 20 MR_DMAAP_LOCALHOST_PORT         ->   23 MR_INTERNAL_PORT                 25 MR_EXTERNAL_PORT                ->   27 MR_INTERNAL_PORT
+# 22 MR_DMAAP_LOCALHOST_SECURE_PORT  ->   24 MR_INTERNAL_SECURE_PORT          26 MR_EXTERNAL_SECURE_PORT                ->   28 MR_INTERNAL_SECURE_PORT
+
+
+# Running only the MR-STUB - apps using MR-STUB
+# DOCKER                                                                      KUBE
+# localhost:          10 and 12                                                -
+# via proxy (script): 13 and 14                                               via proxy (script): 15 and 16
+# apps:               13 and 14                                               apps:               15 and 16
+
+# Running MR-STUB (as frontend for test script) and MESSAGE-ROUTER - apps using MESSAGE-ROUTER
+# DOCKER                                                                      KUBE
+# localhost:          10 and 12                                                -
+# via proxy (script): 13 and 14                                               via proxy (script): 15 and 16
+# apps:               23 and 24                                               apps:               25 and 26
+#
+
+
+
 use_mr_http() {
-       __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+       __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
 }
 
 use_mr_https() {
-       __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+       __mr_set_protocoll "https" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
 }
 
 # Setup paths to svc/container for internal and external access
-# args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# args: <protocol> <internal-port> <external-port> <internal-secure-port> <external-secure-port>
 __mr_set_protocoll() {
        echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
        echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
@@ -214,39 +279,60 @@ __mr_set_protocoll() {
 
        MR_HTTPX=$1
 
+       if [ $MR_HTTPX == "http" ]; then
+               INT_PORT=$2
+               EXT_PORT=$3
+       else
+               INT_PORT=$4
+               EXT_PORT=$5
+       fi
+
        # Access via test script
-       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2  # access from script via proxy, docker
-       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$INT_PORT  # access from script via proxy, docker
+       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$INT_PORT # access from script via proxy, docker
+       MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
 
        MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker -  access pod->svc, kube
+       MR_KAFKA_SERVICE_PATH=""
        __check_included_image "DMAAPMR"
        if [ $? -eq 0 ]; then
                MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker -  access pod->svc, kube
+               MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+
+               MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME":"$MR_KAFKA_PORT
        fi
 
        # For directing calls from script to e.g.PMS via message rounter
-       # Theses case shall always go though the  mr-stub
-       MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
-       MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+       # These cases shall always go though the  mr-stub
+       MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$2
+       MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$4
+
+       MR_DMAAP_ADAPTER_TYPE="REST"
+
+
 
        if [ $RUNMODE == "KUBE" ]; then
-               MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
-               MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+               MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
+               MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
 
                MR_SERVICE_PATH=$MR_STUB_PATH
                __check_included_image "DMAAPMR"
                if [ $? -eq 0 ]; then
                        MR_SERVICE_PATH=$MR_DMAAP_PATH
+                       MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+                       MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
                fi
                __check_prestarted_image "DMAAPMR"
                if [ $? -eq 0 ]; then
                        MR_SERVICE_PATH=$MR_DMAAP_PATH
+                       MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+                       MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
                fi
 
                # For directing calls from script to e.g.PMS, via message rounter
                # These calls shall always go though the  mr-stub
-               MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
-               MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+               MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3
+               MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$5
        fi
 
        # For calls from script to the mr-stub
@@ -254,8 +340,77 @@ __mr_set_protocoll() {
        MR_STUB_ADAPTER_TYPE="REST"
 
        echo ""
+
 }
 
+
+# use_mr_http() {                2                3                  4                5                  6                       7
+#      __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+# }
+
+# use_mr_https() {
+#      __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+# }
+
+# # Setup paths to svc/container for internal and external access
+# # args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# __mr_set_protocoll() {
+#      echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
+#      echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
+
+#      ## Access to Dmaap mediator
+
+#      MR_HTTPX=$1
+
+#      # Access via test script
+#      MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2  # access from script via proxy, docker
+#      MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+#      MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
+
+#      MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker -  access pod->svc, kube
+#      __check_included_image "DMAAPMR"
+#      if [ $? -eq 0 ]; then
+#              MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker -  access pod->svc, kube
+#              MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+#      fi
+
+#      # For directing calls from script to e.g.PMS via message rounter
+#      # These cases shall always go though the  mr-stub
+#      MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
+#      MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+
+#      MR_DMAAP_ADAPTER_TYPE="REST"
+
+#      if [ $RUNMODE == "KUBE" ]; then
+#              MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+#              MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+
+#              MR_SERVICE_PATH=$MR_STUB_PATH
+#              __check_included_image "DMAAPMR"
+#              if [ $? -eq 0 ]; then
+#                      MR_SERVICE_PATH=$MR_DMAAP_PATH
+#                      MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+#              fi
+#              __check_prestarted_image "DMAAPMR"
+#              if [ $? -eq 0 ]; then
+#                      MR_SERVICE_PATH=$MR_DMAAP_PATH
+#                      MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+#              fi
+
+#              # For directing calls from script to e.g.PMS, via message rounter
+#              # These calls shall always go though the  mr-stub
+#              MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
+#              MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+#      fi
+
+#      # For calls from script to the mr-stub
+#      MR_STUB_ADAPTER=$MR_STUB_PATH
+#      MR_STUB_ADAPTER_TYPE="REST"
+
+#      echo ""
+
+# }
+
 # Export env vars for config files, docker compose and kube resources
 # args: -
 __dmaapmr_export_vars() {
@@ -272,6 +427,14 @@ __dmaapmr_export_vars() {
        export MR_DMAAP_LOCALHOST_SECURE_PORT
        export MR_INTERNAL_SECURE_PORT
        export MR_DMAAP_HOST_MNT_DIR
+
+       export KUBE_ONAP_NAMESPACE
+       export MR_EXTERNAL_PORT
+       export MR_EXTERNAL_SECURE_PORT
+       export MR_KAFKA_PORT
+       export MR_ZOOKEEPER_PORT
+
+       export MR_KAFKA_SERVICE_PATH
 }
 
 # Export env vars for config files, docker compose and kube resources
@@ -283,10 +446,17 @@ __mr_export_vars() {
        export MRSTUB_IMAGE
        export MR_INTERNAL_PORT
        export MR_INTERNAL_SECURE_PORT
+       export MR_EXTERNAL_PORT
+       export MR_EXTERNAL_SECURE_PORT
        export MR_STUB_LOCALHOST_PORT
        export MR_STUB_LOCALHOST_SECURE_PORT
        export MR_STUB_CERT_MOUNT_DIR
        export MR_STUB_DISPLAY_NAME
+
+       export KUBE_ONAP_NAMESPACE
+       export MR_EXTERNAL_PORT
+
+       export MR_KAFKA_SERVICE_PATH
 }
 
 
@@ -358,53 +528,33 @@ start_mr() {
 
                        __dmaapmr_export_vars
 
-                       #export MR_DMAAP_APP_NAME
-                       export MR_DMAAP_KUBE_APP_NAME=message-router
-                       MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME
-                       export KUBE_ONAP_NAMESPACE
-                       export MR_EXTERNAL_PORT
-                       export MR_INTERNAL_PORT
-                       export MR_EXTERNAL_SECURE_PORT
-                       export MR_INTERNAL_SECURE_PORT
-                       export ONAP_DMAAPMR_IMAGE
-
-                       export MR_KAFKA_BWDS_NAME=akfak-bwds
-                       export MR_KAFKA_BWDS_NAME=kaka
-                       export KUBE_ONAP_NAMESPACE
-
-                       export MR_ZOOKEEPER_APP_NAME
-                       export ONAP_ZOOKEEPER_IMAGE
-
                        #Check if onap namespace exists, if not create it
                        __kube_create_namespace $KUBE_ONAP_NAMESPACE
 
-                       # TODO - Fix domain name substitution in the prop file
-                       # Create config maps - dmaapmr app
-                       configfile=$PWD/tmp/MsgRtrApi.properties
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile
+                       # copy config files
+                       MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+                       cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/*  $MR_MNT_CONFIG_BASEPATH
 
+                       # Create config maps - dmaapmr app
+                       configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
                        output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml
                        __kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
-                       configfile=$PWD/tmp/logback.xml
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile
+                       configfile=$MR_MNT_CONFIG_BASEPATH/mr/logback.xml
                        output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml
                        __kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
-                       configfile=$PWD/tmp/cadi.properties
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile
+                       configfile=$MR_MNT_CONFIG_BASEPATH/mr/cadi.properties
                        output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml
                        __kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        # Create config maps - kafka app
-                       configfile=$PWD/tmp/zk_client_jaas.conf
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile
+                       configfile=$MR_MNT_CONFIG_BASEPATH/kafka/zk_client_jaas.conf
                        output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml
                        __kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
                        # Create config maps - zookeeper app
-                       configfile=$PWD/tmp/zk_server_jaas.conf
-                       cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile
+                       configfile=$MR_MNT_CONFIG_BASEPATH/zk/zk_server_jaas.conf
                        output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml
                        __kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
 
@@ -419,42 +569,69 @@ start_mr() {
                        __kube_create_instance app $MR_DMAAP_APP_NAME $input_yaml $output_yaml
 
 
-                       echo " Retrieving host and ports for service..."
-                       MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
+                       echo " Retrieving host and ports for service..."
+                       MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
 
-                       MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
-                       MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+                       MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+                       MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
 
-                       echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
-                       MR_SERVICE_PATH=""
-                       if [ $MR_HTTPX == "http" ]; then
-                               MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
-                               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
-                       else
-                               MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
-                               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+                       # echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+                       # MR_SERVICE_PATH=""
+                       # if [ $MR_HTTPX == "http" ]; then
+                       #       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
+                       #       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+                       # else
+                       #       MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
+                       #       MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+                       # fi
+
+                       __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+
+                       # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+                       #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+                       #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+#                      __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+#
+#                      __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+
+
+                       #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+                       #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
+
+                       if [ $# -gt 0 ]; then
+                               if [ $(($#%3)) -eq 0 ]; then
+                                       while [ $# -gt 0 ]; do
+                                               __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+                                               shift; shift; shift;
+                                       done
+                               else
+                                       echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+                                       echo -e $RED" Got: $@"$ERED
+                                       exit 1
+                               fi
                        fi
 
-                               __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+                       echo " Current topics:"
+                       curlString="$MR_DMAAP_PATH/topics"
+                       result=$(__do_curl "$curlString")
+                       echo $result | indent2
 
                fi
 
                if [ $retcode_included_mr -eq 0 ]; then
-                       #exporting needed var for deployment
-                       export MR_STUB_APP_NAME
-                       export KUBE_ONAP_NAMESPACE
-                       export MRSTUB_IMAGE
-                       export MR_INTERNAL_PORT
-                       export MR_INTERNAL_SECURE_PORT
-                       export MR_EXTERNAL_PORT
-                       export MR_EXTERNAL_SECURE_PORT
+
+                       __mr_export_vars
 
                        if [ $retcode_prestarted_dmaapmr -eq 0 ] || [ $retcode_included_dmaapmr -eq 0 ]; then  # Set topics for dmaap
                                export TOPIC_READ="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
                                export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+                               export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT"
                        else
                                export TOPIC_READ=""
                                export TOPIC_WRITE=""
+                               export GENERIC_TOPICS_UPLOAD_BASEURL=""
                        fi
 
                        #Check if onap namespace exists, if not create it
@@ -473,30 +650,29 @@ start_mr() {
 
                fi
 
-
-               echo " Retrieving host and ports for service..."
-               MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
-
-               MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
-               MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
-
-               echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
-               if [ $MR_HTTPX == "http" ]; then
-                       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
-                       if [ -z "$MR_SERVICE_PATH" ]; then
-                               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
-                       fi
-               else
-                       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-                       if [ -z "$MR_SERVICE_PATH" ]; then
-                               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
-                       fi
-               fi
-               MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
-               MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-
-               MR_STUB_ADAPTER=$MR_STUB_PATH
-               MR_STUB_ADAPTER_TYPE="REST"
+               # echo " Retrieving host and ports for service..."
+               # MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
+
+               # MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+               # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+
+               # echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+               # if [ $MR_HTTPX == "http" ]; then
+               #       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+               #       if [ -z "$MR_SERVICE_PATH" ]; then
+               #               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+               #       fi
+               # else
+               #       MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+               #       if [ -z "$MR_SERVICE_PATH" ]; then
+               #               MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+               #       fi
+               # fi
+               # MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+               # MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+
+               # MR_STUB_ADAPTER=$MR_STUB_PATH
+               # MR_STUB_ADAPTER_TYPE="REST"
 
                __check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL
 
@@ -532,26 +708,55 @@ start_mr() {
 
                export TOPIC_READ=""
         export TOPIC_WRITE=""
+               export GENERIC_TOPICS_UPLOAD_BASEURL=""
                if [ $retcode_dmaapmr -eq 0 ]; then  # Set topics for dmaap
                        export TOPIC_READ="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
                        export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+                       export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT"
                fi
 
                __dmaapmr_export_vars
 
                if [ $retcode_dmaapmr -eq 0 ]; then
+
+                       # copy config files
+                       MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+                       cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/*  $MR_MNT_CONFIG_BASEPATH
+
+                       # substitute vars
+                       configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
+                       cp $configfile $configfile"_tmp"
+                       envsubst < $configfile"_tmp" > $configfile
+
                        __start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME
 
                        __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
 
 
-                       __create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+                       # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+                       #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+                       #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+                       #__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
 
-                       __create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+                       #__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
 
-                       __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+                       if [ $# -gt 0 ]; then
+                               if [ $(($#%3)) -eq 0 ]; then
+                                       while [ $# -gt 0 ]; do
+                                               __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+                                               shift; shift; shift;
+                                       done
+                               else
+                                       echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+                                       echo -e $RED" Got: $@"$ERED
+                                       exit 1
+                               fi
+                       fi
 
-                       __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+                       #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+                       #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
 
                        echo " Current topics:"
                        curlString="$MR_DMAAP_PATH/topics"
@@ -575,23 +780,25 @@ start_mr() {
 # Create a dmaap mr topic
 # args: <topic name> <topic-description>
 __create_topic() {
-       echo -ne " Creating read topic: $1"$SAMELINE
+       echo -ne " Creating topic: $1"$SAMELINE
 
        json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
-       echo $json_topic > ./tmp/$1.json
+       fname="./tmp/$1.json"
+       echo $json_topic > $fname
 
-       curlString="$MR_DMAAP_PATH/topics/create -X POST  -H Content-Type:application/json -d@./tmp/$1.json"
-       topic_retries=5
+       query="/topics/create"
+       topic_retries=10
        while [ $topic_retries -gt 0 ]; do
                let topic_retries=topic_retries-1
-               result=$(__do_curl "$curlString")
-               if [ $? -eq 0 ]; then
+               res="$(__do_curl_to_api DMAAPMR POST $query $fname)"
+               status=${res:${#res}-3}
+
+               if [[ $status == "2"* ]]; then
                        topic_retries=0
-                       echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
-               fi
-               if [ $? -ne 0 ]; then
+                       echo -e " Creating topic: $1 $GREEN OK $EGREEN"
+               else
                        if [ $topic_retries -eq 0 ]; then
-                               echo -e " Creating read topic: $1 $RED Failed $ERED"
+                               echo -e " Creating topic: $1 $RED Failed $ERED"
                                ((RES_CONF_FAIL++))
                                return 1
                        else
@@ -599,18 +806,27 @@ __create_topic() {
                        fi
                fi
        done
+       echo
        return 0
 }
 
 # Do a pipeclean of a topic - to overcome dmaap mr bug...
-# args: <topic> <post-url> <read-url>
+# args: <topic> <post-url> <read-url> [<num-retries>]
 __dmaap_pipeclean() {
        pipeclean_retries=50
+       if [ $# -eq 4 ]; then
+               pipeclean_retries=$4
+       fi
        echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
        while [ $pipeclean_retries -gt 0 ]; do
-               echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
+               if [[ $1 == *".text" ]]; then
+                       echo "pipeclean-$1:$pipeclean_retries" > ./tmp/__dmaap_pipeclean.txt
+                       curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:text/plain -d@./tmp/__dmaap_pipeclean.txt"
+               else
+                       echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/__dmaap_pipeclean.json
+                       curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/__dmaap_pipeclean.json"
+               fi
                let pipeclean_retries=pipeclean_retries-1
-               curlString="$MR_DMAAP_PATH$2 -X POST  -H Content-Type:application/json -d@./tmp/pipeclean.json"
                result=$(__do_curl "$curlString")
                if [ $? -ne 0 ]; then
                        sleep 1
@@ -688,7 +904,7 @@ mr_print() {
 # arg: <topic-url> <json-msg>
 # (Function for test scripts)
 mr_api_send_json() {
-       __log_test_start $@
+       __log_conf_start $@
     if [ $# -ne 2 ]; then
         __print_err "<topic-url> <json-msg>" $@
         return 1
@@ -700,10 +916,139 @@ mr_api_send_json() {
 
        status=${res:${#res}-3}
        if [ $status -ne 200 ]; then
-               __log_test_fail_status_code 200 $status
+               __log_conf_fail_status_code 200 $status
+               return 1
+       fi
+
+       __log_conf_ok
+       return 0
+}
+
+# Send text to topic in mr-stub.
+# arg: <topic-url> <text-msg>
+# (Function for test scripts)
+mr_api_send_text() {
+       __log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-msg>" $@
+        return 1
+    fi
+       query=$1
+       fname=$PWD/tmp/text_payload_to_mr.txt
+       echo $2 > $fname
+       res="$(__do_curl_to_api MRSTUB POST $query $fname text/plain)"
+
+       status=${res:${#res}-3}
+       if [ $status -ne 200 ]; then
+               __log_conf_fail_status_code 200 $status
+               return 1
+       fi
+
+       __log_conf_ok
+       return 0
+}
+
+# Send json file to topic in mr-stub.
+# arg: <topic-url> <json-file>
+# (Function for test scripts)
+mr_api_send_json_file() {
+       __log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <json-file>" $@
+        return 1
+    fi
+       query=$1
+       if [ ! -f $2 ]; then
+               __log_test_fail_general "File $2 does not exist"
+               return 1
+       fi
+       #Create json array for mr
+       datafile="tmp/mr_api_send_json_file.json"
+       { echo -n "[" ; cat $2 ; echo -n "]" ;} > $datafile
+
+       res="$(__do_curl_to_api MRSTUB POST $query $datafile)"
+
+       status=${res:${#res}-3}
+       if [ $status -ne 200 ]; then
+               __log_conf_fail_status_code 200 $status
+               return 1
+       fi
+
+       __log_conf_ok
+       return 0
+}
+
+# Send text file to topic in mr-stub.
+# arg: <topic-url> <text-file>
+# (Function for test scripts)
+mr_api_send_text_file() {
+       __log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-file>" $@
+        return 1
+    fi
+       query=$1
+       if [ ! -f $2 ]; then
+               __log_test_fail_general "File $2 does not exist"
+               return 1
+       fi
+
+       res="$(__do_curl_to_api MRSTUB POST $query $2 text/plain)"
+
+       status=${res:${#res}-3}
+       if [ $status -ne 200 ]; then
+               __log_conf_fail_status_code 200 $status
                return 1
        fi
 
-       __log_test_pass
+       __log_conf_ok
+       return 0
+}
+
+# Create json file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_json_payload_file() {
+       __log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <json-file>" $@
+        return 1
+    fi
+       if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+               __log_conf_fail_general "Only size between 1k and 10000k supported"
+               return 1
+       fi
+       echo -n "{\"a\":[" > $2
+       LEN=$(($1*150))
+       echo -n "\"a0\"" >> $2
+       for ((idx=1; idx<$LEN; idx++))
+       do
+               echo -n ",\"a$idx\"" >> $2
+       done
+       echo -n "]}" >> $2
+
+       __log_conf_ok
+       return 0
+}
+
+# Create tet file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_text_payload_file() {
+       __log_conf_start $@
+    if [ $# -ne 2 ]; then
+        __print_err "<topic-url> <text-file>" $@
+        return 1
+    fi
+       if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+               __log_conf_fail_general "Only size between 1k and 10000k supported"
+               return 1
+       fi
+       echo -n "" > $2
+       LEN=$(($1*100))
+       for ((idx=0; idx<$LEN; idx++))
+       do
+               echo -n "ABCDEFGHIJ" >> $2
+       done
+
+       __log_conf_ok
        return 0
 }
index bb4ccf5..6c3ce23 100644 (file)
@@ -107,6 +107,18 @@ __PRODSTUB_initial_setup() {
        use_prod_stub_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PRODSTUB_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "PRODSTUB $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE"
+       else
+               echo "PRODSTUB $PROD_STUB_APP_NAME"
+       fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Prod stub sim
index 62c2d43..5d37bd0 100644 (file)
@@ -90,6 +90,14 @@ __PVCCLEANER_initial_setup() {
        :
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PVCCLEANER_statisics_setup() {
+       echo ""
+}
+
 #######################################################
 
 # This is a system app, all usage in testcase_common.sh
\ No newline at end of file
index 52416d3..537bc0c 100644 (file)
@@ -84,6 +84,18 @@ __RC_initial_setup() {
        use_rapp_catalogue_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RC_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo "RC $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+       else
+               echo "RC $RAPP_CAT_APP_NAME"
+       fi
+}
+
 #######################################################
 
 # Set http as the protocol to use for all communication to the Rapp catalogue
index f760313..695b535 100644 (file)
@@ -91,6 +91,18 @@ __RICSIM_initial_setup() {
        use_simulator_http
 }
 
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RICSIM_statisics_setup() {
+       if [ $RUNMODE == "KUBE" ]; then
+               echo ""
+       else
+               echo ""
+       fi
+}
+
 #######################################################
 
 
index 8344f38..6cb18f5 100755 (executable)
@@ -161,9 +161,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -189,7 +189,7 @@ POLICY_AGENT_CONFIG_FILE="application.yaml"              # Container config file
 POLICY_AGENT_DATA_FILE="application_configuration.json"  # Container data file name
 POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -210,10 +210,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -222,6 +224,8 @@ CR_INTERNAL_PORT=8090                                    # Callback receiver con
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -310,6 +314,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
index 00e5d4b..c293420 100755 (executable)
@@ -185,9 +185,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -233,7 +233,7 @@ ECS_CONFIG_FILE=application.yaml                         # Config file name
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL=""                                     # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -254,9 +254,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -266,6 +269,8 @@ CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver con
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -378,6 +383,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
index f8c411f..5b11137 100644 (file)
@@ -69,10 +69,10 @@ NEXUS_RELEASE_REPO_ONAP=$NEXUS_RELEASE_REPO
 
 # Policy Agent image and tags
 POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
 
 # SDNC A1 Controller remote image and tag
 SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
@@ -146,17 +146,17 @@ HTTP_PROXY_IMAGE_TAG_LOCAL="latest"
 
 #ONAP Zookeeper remote image and tag
 ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
-ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
 #No local image for ONAP Zookeeper, remote image always used
 
 #ONAP Kafka remote image and tag
 ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
-ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
 #No local image for ONAP Kafka, remote image always used
 
 #ONAP DMAAP-MR remote image and tag
 ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
-ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
 #No local image for ONAP DMAAP-MR, remote image always used
 
 #Kube proxy remote image and tag
@@ -188,9 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -236,7 +236,7 @@ ECS_CONFIG_FILE=application.yaml                         # Config file name
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES"                           # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -257,9 +257,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2"                             # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -269,6 +272,8 @@ CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver con
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -397,6 +402,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
index 43077ea..641aabe 100755 (executable)
@@ -188,9 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -236,7 +236,7 @@ ECS_CONFIG_FILE=application.yaml                         # Config file name
 ECS_VERSION="V1-2"                                       # Version where the types are added in the producer registration
 ECS_FEATURE_LEVEL=""                                     # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -257,10 +257,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback Reciever"
@@ -269,6 +271,8 @@ CR_INTERNAL_PORT=8090                                    # Callback receiver con
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -378,6 +382,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
index cc510d5..18f7e17 100755 (executable)
@@ -207,9 +207,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net"                  # Name of docker privat
 
 KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all nonrtric components
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim"                          # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -255,7 +255,7 @@ ECS_CONFIG_FILE=application.yaml                         # Config file name
 ECS_VERSION="V1-2"                                       # Version where the types are decoupled from the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES"                           # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -276,10 +276,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt"                              # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback receiver"
@@ -288,6 +290,8 @@ CR_INTERNAL_PORT=8090                                    # Callback receiver con
 CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver container external secure port (host -> container)
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -441,6 +445,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731                        # Kube Http Proxy conta
 KUBE_PROXY_WEB_INTERNAL_PORT=8081                        # Kube Http Proxy container internal port (container -> container)
 KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783                 # Kube Proxy container external secure port (host -> container)
 KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434                 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732                     # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784              # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733                 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785          # Kube Proxy container external secure port, doocker (host -> container)
+
 KUBE_PROXY_PATH=""                                       # Proxy url path, will be set if proxy is started
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
index e2b53da..546e94c 100755 (executable)
@@ -235,7 +235,7 @@ KUBE_NONRTRIC_NAMESPACE="nonrtric"                       # Namespace for all non
 KUBE_SIM_NAMESPACE="nonrtric-ft"                         # Namespace for simulators (except MR and RICSIM)
 KUBE_A1SIM_NAMESPACE="a1-sim"                            # Namespace for a1-p simulators (RICSIM)
 KUBE_ONAP_NAMESPACE="onap"                               # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap"                               # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap"                               # Namespace for sdnc
 
 POLICY_AGENT_EXTERNAL_PORT=8081                          # Policy Agent container external port (host -> container)
 POLICY_AGENT_INTERNAL_PORT=8081                          # Policy Agent container internal port (container -> container)
@@ -281,7 +281,7 @@ ECS_CONFIG_FILE=application.yaml                         # Config file name
 ECS_VERSION="V1-2"                                       # Version where the types are decoupled from the producer registration
 ECS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO"  # Space separated list of features
 
-MR_DMAAP_APP_NAME="dmaap-mr"                             # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router"                       # Name for the Dmaap MR
 MR_STUB_APP_NAME="mr-stub"                               # Name of the MR stub
 MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
 MR_STUB_DISPLAY_NAME="Message Router stub"
@@ -302,10 +302,12 @@ MR_STUB_ALIVE_URL="/"                                    # Base path for mr stub
 MR_DMAAP_ALIVE_URL="/topics"                             # Base path for dmaap-mr alive check
 MR_DMAAP_COMPOSE_DIR="dmaapmr"                           # Dir in simulator_group for dmaap mr for - docker-compose
 MR_STUB_COMPOSE_DIR="mrstub"                             # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka"                                # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka"                 # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092                                       # Kafka port number
 MR_ZOOKEEPER_APP_NAME="zookeeper"                        # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2"                            # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181"                                 # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt"                             # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs"                      # Config files dir on localhost
 
 CR_APP_NAME="callback-receiver"                          # Name for the Callback receiver
 CR_DISPLAY_NAME="Callback receiver"
@@ -315,6 +317,7 @@ CR_EXTERNAL_SECURE_PORT=8091                             # Callback receiver con
 CR_INTERNAL_SECURE_PORT=8091                             # Callback receiver container internal secure port (container -> container)
 CR_APP_CALLBACK="/callbacks"                             # Url for callbacks
 CR_APP_CALLBACK_MR="/callbacks-mr"                       # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text"                   # Url for callbacks (data containing text data)
 CR_ALIVE_URL="/"                                         # Base path for alive check
 CR_COMPOSE_DIR="cr"                                      # Dir in simulator_group for docker-compose
 
@@ -478,6 +481,10 @@ KUBE_PROXY_PATH=""                                       # Proxy url path, will
 KUBE_PROXY_ALIVE_URL="/"                                 # Base path for alive check
 KUBE_PROXY_COMPOSE_DIR="kubeproxy"                       # Dir in simulator_group for docker-compose
 
+PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+
 DMAAP_ADP_APP_NAME="dmaapadapterservice"                 # Name for Dmaap Adapter container
 DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service"           # Display name for Dmaap Adapter container
 DMAAP_ADP_EXTERNAL_PORT=9087                             # Dmaap Adapter container external port (host -> container)
@@ -511,18 +518,13 @@ DMAAP_MED_HOST_MNT_DIR="./mnt"                          # Mounted db dir, relati
 #DMAAP_MED_CERT_MOUNT_DIR="./cert"
 DMAAP_MED_ALIVE_URL="/status"                            # Base path for alive check
 DMAAP_MED_COMPOSE_DIR="dmaapmed"                         # Dir in simulator_group for docker-compose
-#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
-DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
-DMAAP_MED_DATA_FILE="type_config.json"  # Container data file name
-#DMAAP_MED_CONFIG_FILE=application.yaml                   # Config file name
-
-PVC_CLEANER_APP_NAME="pvc-cleaner"                      # Name for Persistent Volume Cleaner container
-PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner"    # Display name for Persistent Volume Cleaner
-PVC_CLEANER_COMPOSE_DIR="pvc-cleaner"                   # Dir in simulator_group for yamls
+#MAAP_MED_CONFIG_MOUNT_PATH="/app"                       # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs"                     # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json"                   # Container data file name
 
 ########################################
 # Setting for common curl-base function
 ########################################
 
-UUID=""                                                   # UUID used as prefix to the policy id to simulate a real UUID
-                                                          # Testscript need to set the UUID otherwise this empty prefix is used
+UUID=""                                                  # UUID used as prefix to the policy id to simulate a real UUID
+                                                         # Testscript need to set the UUID otherwise this empty prefix is used
index 8d832d7..78eeb54 100755 (executable)
@@ -28,7 +28,7 @@ __print_args() {
        echo "      [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+]  [--use-snapshot-image <app-nam>+]"
        echo "      [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
        echo "      [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
-       echo "      [--override <override-environment-filename> --pre-clean]"
+       echo "      [--override <override-environment-filename> --pre-clean --gen-stats]"
 }
 
 if [ $# -eq 1 ] && [ "$1" == "help" ]; then
@@ -59,6 +59,7 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then
        echo "--print-stats         -  Print current test stats after each test."
        echo "--override <file>     -  Override setting from the file supplied by --env-file"
        echo "--pre-clean           -  Will clean kube resouces when running docker and vice versa"
+       echo "--gen-stats           -  Collect container/pod runtime statistics"
 
        echo ""
        echo "List of app short names supported: "$APP_SHORT_NAMES
@@ -207,6 +208,9 @@ RES_DEVIATION=0
 #Var to control if current stats shall be printed
 PRINT_CURRENT_STATS=0
 
+#Var to control if container/pod runtim statistics shall be collected
+COLLECT_RUNTIME_STATS=0
+
 #File to keep deviation messages
 DEVIATION_FILE=".tmp_deviations"
 rm $DEVIATION_FILE &> /dev/null
@@ -222,6 +226,9 @@ trap_fnc() {
 }
 trap trap_fnc ERR
 
+# Trap to kill subprocesses
+trap "kill 0" EXIT
+
 # Counter for tests
 TEST_SEQUENCE_NR=1
 
@@ -652,6 +659,15 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do
                        foundparm=0
                fi
        fi
+       if [ $paramerror -eq 0 ]; then
+               if [ "$1" == "--gen-stats" ]; then
+                       COLLECT_RUNTIME_STATS=1
+                       echo "Option set - Collect runtime statistics"
+                       shift;
+                       foundparm=0
+               fi
+       fi
+
 done
 echo ""
 
@@ -768,7 +784,7 @@ if [ $? -ne 0 ] || [ -z tmp ]; then
        fi
 fi
 if [ $RUNMODE == "DOCKER" ]; then
-       tmp=$(docker-compose version | grep -i 'Docker Compose version')
+       tmp=$(docker-compose version | grep -i 'docker' | grep -i 'compose' | grep -i 'version')
        if [[ "$tmp" == *'v2'* ]]; then
                echo -e $RED"docker-compose is using docker-compose version 2"$ERED
                echo -e $RED"The test environment only support version 1"$ERED
@@ -1449,6 +1465,8 @@ setup_testenvironment() {
        echo -e $BOLD"======================================================="$EBOLD
        echo ""
 
+       LOG_STAT_ARGS=""
+
        for imagename in $APP_SHORT_NAMES; do
                __check_included_image $imagename
                retcode_i=$?
@@ -1464,9 +1482,16 @@ setup_testenvironment() {
 
                        function_pointer="__"$imagename"_initial_setup"
                        $function_pointer
+
+                       function_pointer="__"$imagename"_statisics_setup"
+                       LOG_STAT_ARGS=$LOG_STAT_ARGS" "$($function_pointer)
                fi
        done
 
+       if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+               ../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS &
+       fi
+
 }
 
 # Function to print the test result, shall be the last cmd in a test script
@@ -1498,8 +1523,16 @@ print_result() {
        echo "Timer measurement in the test script"
        echo "===================================="
        column -t -s $'\t' $TIMER_MEASUREMENTS
+       if [ $RES_PASS != $RES_TEST ]; then
+               echo -e $RED"Measurement may not be reliable when there are failed test - script timeouts may cause long measurement values"$ERED
+       fi
        echo ""
 
+       if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+               echo "Runtime statistics collected in file: "$TESTLOGS/$ATC/stat_data.csv
+               echo ""
+       fi
+
        total=$((RES_PASS+RES_FAIL))
        if [ $RES_TEST -eq 0 ]; then
                echo -e "\033[1mNo tests seem to have been executed. Check the script....\033[0m"
@@ -2142,41 +2175,6 @@ __kube_create_configmap() {
        return 0
 }
 
-# Function to create a configmap in kubernetes
-# args: <configmap-name> <namespace> <labelname> <labelid> <path-to-data-file> <path-to-output-yaml>
-# (Not for test scripts)
-__kube_create_configmapXXXXXXXXXXXXX() {
-       echo -ne " Creating configmap $1 "$SAMELINE
-       #envsubst < $5 > $5"_tmp"
-       #cp $5"_tmp" $5  #Need to copy back to orig file name since create configmap neeed the original file name
-       kubectl create configmap $1  -n $2 --from-file=$5 --dry-run=client -o yaml > $6
-       if [ $? -ne 0 ]; then
-               echo -e " Creating configmap $1 $RED Failed $ERED"
-               ((RES_CONF_FAIL++))
-               return 1
-       fi
-
-       kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
-       if [ $? -ne 0 ]; then
-               echo -e " Creating configmap $1 $RED Apply failed $ERED"
-               echo "  Message: $(<./tmp/kubeerr)"
-               ((RES_CONF_FAIL++))
-               return 1
-       fi
-       kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
-       if [ $? -ne 0 ]; then
-               echo -e " Creating configmap $1 $RED Labeling failed $ERED"
-               echo "  Message: $(<./tmp/kubeerr)"
-               ((RES_CONF_FAIL++))
-               return 1
-       fi
-       # Log the resulting map
-       kubectl get configmap $1 -n $2 -o yaml > $6
-
-       echo -e " Creating configmap $1 $GREEN OK $EGREEN"
-       return 0
-}
-
 # This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
 # The function retries up to the timeout given in the cmd flag '--cluster-timeout'
 # args: <full kubectl cmd with parameters>
@@ -2294,12 +2292,14 @@ clean_environment() {
                if [ $PRE_CLEAN -eq 1 ]; then
                        echo " Clean docker resouces to free up resources, may take time..."
                        ../common/clean_docker.sh 2&>1 /dev/null
+                       echo ""
                fi
        else
                __clean_containers
                if [ $PRE_CLEAN -eq 1 ]; then
-                       echo " Clean kubernetes resouces to free up resources, may take time..."
+                       echo " Cleaning kubernetes resouces to free up resources, may take time..."
                        ../common/clean_kube.sh 2&>1 /dev/null
+                       echo ""
                fi
        fi
 }
index 4b4d8da..94ef606 100644 (file)
@@ -25,6 +25,7 @@ import traceback
 import logging
 import socket
 from threading import RLock
+from hashlib import md5
 
 # Disable all logging of GET on reading counters and db
 class AjaxFilter(logging.Filter):
@@ -54,6 +55,7 @@ hosts_set=set()
 # Request and response constants
 CALLBACK_URL="/callbacks/<string:id>"
 CALLBACK_MR_URL="/callbacks-mr/<string:id>" #Json list with string encoded items
+CALLBACK_TEXT_URL="/callbacks-text/<string:id>" # Callback for string of text
 APP_READ_URL="/get-event/<string:id>"
 APP_READ_ALL_URL="/get-all-events/<string:id>"
 DUMP_ALL_URL="/db"
@@ -111,7 +113,14 @@ def receiveresponse(id):
                 cntr_callbacks[id][1]+=1
                 msg=msg_callbacks[id][0]
                 print("Fetching msg for id: "+id+", msg="+str(msg))
-                del msg[TIME_STAMP]
+
+                if (isinstance(msg,dict)):
+                    del msg[TIME_STAMP]
+                    if ("md5" in msg.keys()):
+                        print("EXTRACTED MD5")
+                        msg=msg["md5"]
+                        print("MD5: "+str(msg))
+
                 del msg_callbacks[id][0]
                 return json.dumps(msg),200
             print("No messages for id: "+id)
@@ -139,7 +148,8 @@ def receiveresponse_all(id):
                 msg=msg_callbacks[id]
                 print("Fetching all msgs for id: "+id+", msg="+str(msg))
                 for sub_msg in msg:
-                    del sub_msg[TIME_STAMP]
+                    if (isinstance(sub_msg, dict)):
+                        del sub_msg[TIME_STAMP]
                 del msg_callbacks[id]
                 return json.dumps(msg),200
             print("No messages for id: "+id)
@@ -180,7 +190,8 @@ def events_write(id):
 
         with lock:
             cntr_msg_callbacks += 1
-            msg[TIME_STAMP]=str(datetime.now())
+            if (isinstance(msg, dict)):
+                msg[TIME_STAMP]=str(datetime.now())
             if (id in msg_callbacks.keys()):
                 msg_callbacks[id].append(msg)
             else:
@@ -202,8 +213,9 @@ def events_write(id):
     return 'OK',200
 
 
-# Receive a json callback message with payload fromatted accoirding to output frm the message router
-# URI and payload, (PUT or POST): /callbacks/<id> <json messages>
+# Receive a json callback message with payload formatted according to output from the message router
+# Array of stringified json objects
+# URI and payload, (PUT or POST): /callbacks-mr/<id> <json messages>
 # json is a list of string encoded json items
 # response: OK 200 or 500 for other errors
 @app.route(CALLBACK_MR_URL,
@@ -212,17 +224,21 @@ def events_write_mr(id):
     global msg_callbacks
     global cntr_msg_callbacks
 
+    storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+                                        #Large payloads will otherwise overload the server
     try:
         print("Received callback (mr) for id: "+id +", content-type="+request.content_type)
-        remote_host_logging(request)
         print("raw data: str(request.data): "+str(request.data))
+        if (storeas is None):
+            print("raw data: str(request.data): "+str(request.data))
         do_delay()
         try:
             #if (request.content_type == MIME_JSON):
             if (MIME_JSON in request.content_type):
                 data = request.data
                 msg_list = json.loads(data)
-                print("Payload(json): "+str(msg_list))
+                if (storeas is None):
+                    print("Payload(json): "+str(msg_list))
             else:
                 msg_list=[]
                 print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
@@ -234,11 +250,21 @@ def events_write_mr(id):
         with lock:
             remote_host_logging(request)
             for msg in msg_list:
-                print("msg (str): "+str(msg))
-                msg=json.loads(msg)
-                print("msg (json): "+str(msg))
+                if (storeas is None):
+                    msg=json.loads(msg)
+                else:
+                    #Convert to compact json without ws between parameter and value...
+                    #It seem that ws is added somewhere along to way to this server
+                    msg=json.loads(msg)
+                    msg=json.dumps(msg, separators=(',', ':'))
+
+                    md5msg={}
+                    md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+                    msg=md5msg
+                    print("msg (json converted to md5 hash): "+str(msg["md5"]))
                 cntr_msg_callbacks += 1
-                msg[TIME_STAMP]=str(datetime.now())
+                if (isinstance(msg, dict)):
+                    msg[TIME_STAMP]=str(datetime.now())
                 if (id in msg_callbacks.keys()):
                     msg_callbacks[id].append(msg)
                 else:
@@ -259,6 +285,73 @@ def events_write_mr(id):
 
     return 'OK',200
 
+# Receive a callback message of a single text message (content type ignored)
+# or a json array of strings (content type json)
+# URI and payload, (PUT or POST): /callbacks-text/<id> <text message>
+# response: OK 200 or 500 for other errors
+@app.route(CALLBACK_TEXT_URL,
+    methods=['PUT','POST'])
+def events_write_text(id):
+    global msg_callbacks
+    global cntr_msg_callbacks
+
+    storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+                                        #Large payloads will otherwise overload the server
+    try:
+        print("Received callback for id: "+id +", content-type="+request.content_type)
+        remote_host_logging(request)
+        if (storeas is None):
+            print("raw data: str(request.data): "+str(request.data))
+        do_delay()
+
+        try:
+            msg_list=None
+            if (MIME_JSON in request.content_type):  #Json array of strings
+                msg_list=json.loads(request.data)
+            else:
+                data=request.data.decode("utf-8")    #Assuming string
+                msg_list=[]
+                msg_list.append(data)
+
+            for msg in msg_list:
+                if (storeas == "md5"):
+                    md5msg={}
+                    print("msg: "+str(msg))
+                    print("msg (endcode str): "+str(msg.encode('utf-8')))
+                    md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+                    msg=md5msg
+                    print("msg (data converted to md5 hash): "+str(msg["md5"]))
+
+                if (isinstance(msg, dict)):
+                    msg[TIME_STAMP]=str(datetime.now())
+
+                with lock:
+                    cntr_msg_callbacks += 1
+                    if (id in msg_callbacks.keys()):
+                        msg_callbacks[id].append(msg)
+                    else:
+                        msg_callbacks[id]=[]
+                        msg_callbacks[id].append(msg)
+
+                    if (id in cntr_callbacks.keys()):
+                        cntr_callbacks[id][0] += 1
+                    else:
+                        cntr_callbacks[id]=[]
+                        cntr_callbacks[id].append(1)
+                        cntr_callbacks[id].append(0)
+        except Exception as e:
+            print(CAUGHT_EXCEPTION+str(e))
+            traceback.print_exc()
+            return 'NOTOK',500
+
+
+    except Exception as e:
+        print(CAUGHT_EXCEPTION+str(e))
+        traceback.print_exc()
+        return 'NOTOK',500
+
+    return 'OK',200
+
 ### Functions for test ###
 
 # Dump the whole db of current callbacks
index e1b9ff9..32beca1 100644 (file)
@@ -43,7 +43,10 @@ http {
            proxy_set_header   X-Real-IP            $remote_addr;
            proxy_set_header   X-Forwarded-For      $proxy_add_x_forwarded_for;
            proxy_pass      http://localhost:2222;
+
+           client_max_body_size 0;
         }
+
     }
     ##
     # SSL Settings
index fb6d674..4b1913f 100644 (file)
@@ -69,11 +69,13 @@ SERVER_ERROR="Server error :"
 
 topic_write=""
 topic_read=""
+generic_topics_upload_baseurl=""
 
 uploader_thread=None
 downloader_thread=None
+generic_uploader_thread=None
 
-# Function to download messages from dmaap
+# Function to upload PMS messages to dmaap
 def dmaap_uploader():
     global msg_requests
     global cntr_msg_requests_fetched
@@ -107,7 +109,7 @@ def dmaap_uploader():
         sleep(0.01)
 
 
-# Function to upload messages to dmaap
+# Function to download PMS messages from dmaap
 def dmaap_downloader():
     global msg_responses
     global cntr_msg_responses_submitted
@@ -150,6 +152,48 @@ def dmaap_downloader():
         except Exception as e:
             sleep(1)
 
+# Function to upload generic messages to dmaap
+def dmaap_generic_uploader():
+    global msg_requests
+    global cntr_msg_requests_fetched
+
+    print("Starting generic uploader")
+
+    headers_json = {'Content-type': 'application/json', 'Accept': '*/*'}
+    headers_text = {'Content-type': 'text/plain', 'Accept': '*/*'}
+
+    while True:
+        if (len(generic_messages)):
+            for topicname in generic_messages.keys():    #topicname contains the path of the topics, eg. "/event/<topic>"
+                topic_queue=generic_messages[topicname]
+                if (len(topic_queue)>0):
+                    if (topicname.endswith(".text")):
+                        msg=topic_queue[0]
+                        headers=headers_text
+                    else:
+                        msg=topic_queue[0]
+                        msg=json.dumps(msg)
+                        headers=headers_json
+                    url=generic_topics_upload_baseurl+topicname
+                    print("Sending to dmaap : "+ url)
+                    print("Sending to dmaap : "+ msg)
+                    print("Sending to dmaap : "+ str(headers))
+                    try:
+                        resp=requests.post(url, data=msg, headers=headers, timeout=10)
+                        if (resp.status_code<199 & resp.status_code > 299):
+                            print("Failed, response code: " + str(resp.status_code))
+                            sleep(1)
+                        else:
+                            print("Dmaap response code: " + str(resp.status_code))
+                            print("Dmaap response text: " + str(resp.text))
+                            with lock:
+                                topic_queue.pop(0)
+                                cntr_msg_requests_fetched += 1
+                    except Exception as e:
+                        print("Failed, exception: "+ str(e))
+                        sleep(1)
+        sleep(0.01)
+
 #I'm alive function
 @app.route('/',
     methods=['GET'])
@@ -157,7 +201,7 @@ def index():
     return 'OK', 200
 
 
-# Helper function to create a Dmaap request message
+# Helper function to create a Dmaap PMS request message
 # args : <GET|PUT|DELETE> <correlation-id> <json-string-payload - may be None> <url>
 # response: json formatted string of a complete Dmaap message
 def create_message(operation, correlation_id, payload, url):
@@ -171,7 +215,7 @@ def create_message(operation, correlation_id, payload, url):
 
 ### MR-stub interface, for MR control
 
-# Send a message to MR
+# Send a PMS message to MR
 # URI and parameters (PUT or POST): /send-request?operation=<GET|PUT|POST|DELETE>&url=<url>
 # response: <correlation-id> (http 200) o4 400 for parameter error or 500 for other errors
 @app.route(APP_WRITE_URL,
@@ -212,7 +256,7 @@ def sendrequest():
             print(APP_WRITE_URL+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
             return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT)
 
-# Receive a message response for MR for the included correlation id
+# Receive a PMS message response for MR for the included correlation id
 # URI and parameter, (GET): /receive-response?correlationid=<correlation-id>
 # response: <json-array of 1 response> 200 or empty 204 or other errors 500
 @app.route(APP_READ_URL,
@@ -243,7 +287,7 @@ def receiveresponse():
 
 ### Dmaap interface ###
 
-# Read messages stream. URI according to agent configuration.
+# Read PMS messages stream. URI according to agent configuration.
 # URI, (GET): /events/A1-POLICY-AGENT-READ/users/policy-agent
 # response: 200 <json array of request messages>, or 500 for other errors
 @app.route(AGENT_READ_URL,
@@ -299,7 +343,7 @@ def events_read():
     print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time))
     return Response("[]", status=200, mimetype=MIME_JSON)
 
-# Write messages stream. URI according to agent configuration.
+# Write PMS messages stream. URI according to agent configuration.
 # URI and payload, (PUT or POST): /events/A1-POLICY-AGENT-WRITE <json array of response messages>
 # response: OK 200 or 400 for missing json parameters, 500 for other errors
 @app.route(AGENT_WRITE_URL,
@@ -367,10 +411,10 @@ def oru_read():
         return Response(json.dumps(res), status=200, mimetype=MIME_JSON)
     return Response("[]", status=200, mimetype=MIME_JSON)
 
-# Generic POST/PUT catching all urls starting with /events/<topic>.
+# Generic POST catching all urls starting with /events/<topic>.
 # Writes the message in a que for that topic
 @app.route("/events/<path>",
-    methods=['PUT','POST'])
+    methods=['POST'])
 def generic_write(path):
     global generic_messages
     global cntr_msg_responses_submitted
@@ -378,8 +422,12 @@ def generic_write(path):
     write_method=str(request.method)
     with lock:
         try:
-            payload=request.json
-            print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
+            if (urlkey.endswith(".text")):
+                payload=str(request.data.decode('UTF-8'))
+                print(write_method+" on "+urlkey+" text=" + payload)
+            else:
+                payload=request.json
+                print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
             topicmsgs=[]
             if (urlkey in generic_messages.keys()):
                 topicmsgs=generic_messages[urlkey]
@@ -407,6 +455,9 @@ def generic_read(path):
     global generic_messages
     global cntr_msg_requests_fetched
 
+    if generic_topics_upload_baseurl:
+        return Response('Url not available when running as mrstub frontend', status=404, mimetype=MIME_TEXT)
+
     urlpath="/events/"+str(path)
     urlkey="/events/"+str(path).split("/")[0] #Extract topic
     print("GET on topic"+urlkey)
@@ -530,7 +581,14 @@ if os.getenv("TOPIC_READ") is not None:
         uploader_thread=Thread(target=dmaap_uploader)
         uploader_thread.start()
 
-else:
+if os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is not None:
+    print("GENERIC_TOPICS_UPLOAD_BASEURL:"+os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'])
+    generic_topics_upload_baseurl=os.environ['GENERIC_TOPICS_UPLOAD_BASEURL']
+    if generic_topics_upload_baseurl and generic_uploader_thread is None:
+        generic_uploader_thread=Thread(target=dmaap_generic_uploader)
+        generic_uploader_thread.start()
+
+if os.getenv("TOPIC_READ") is None or os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is None:
     print("No env variables - OK")
 
 if __name__ == "__main__":
index c548e56..35b5ba0 100644 (file)
@@ -39,7 +39,8 @@ http {
 
         # serve dynamic requests
         location / {
-        proxy_pass      http://localhost:2222;
+            proxy_pass      http://localhost:2222;
+            client_max_body_size 0;
         }
     }
     ##
index b20a9d7..f96db09 100644 (file)
@@ -68,4 +68,7 @@ app:
   configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json
   dmaap-base-url: $MR_SERVICE_PATH
   # The url used to adress this component. This is used as a callback url sent to other components.
-  dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
\ No newline at end of file
+  dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
+  # KAFKA boostrap server. This is only needed if there are Information Types that uses a kafkaInputTopic
+  kafka:
+    bootstrap-servers: $MR_KAFKA_SERVICE_PATH
index b6605e3..e36d910 100644 (file)
@@ -2,8 +2,13 @@
   "types": [
      {
         "id": "ExampleInformationType",
-        "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs",
+        "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=15000&limit=100",
         "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
-     }
+     },
+     {
+      "id": "ExampleInformationTypeKafka",
+      "kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text",
+      "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
+   }
   ]
 }
\ No newline at end of file
diff --git a/test/simulator-group/dmaapadp/mnt/.gitignore b/test/simulator-group/dmaapadp/mnt/.gitignore
new file mode 100644 (file)
index 0000000..cdf0793
--- /dev/null
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
index e0296fa..aa8a0f1 100644 (file)
@@ -40,7 +40,7 @@ spec:
         - name: DMAAP_MR_ADDR
           value: "$MR_SERVICE_PATH"
         - name: LOG_LEVEL
-          value: "Debug"
+          value: Debug
       volumes:
       - configMap:
           defaultMode: 420
index 21fe551..d0672df 100644 (file)
@@ -32,7 +32,7 @@ services:
       - INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT}
       - INFO_COORD_ADDR=${ECS_SERVICE_PATH}
       - DMAAP_MR_ADDR=${MR_SERVICE_PATH}
-      - LOG_LEVEL="Debug"
+      - LOG_LEVEL=Debug
     volumes:
     - ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
     labels:
diff --git a/test/simulator-group/dmaapmed/mnt/.gitignore b/test/simulator-group/dmaapmed/mnt/.gitignore
new file mode 100644 (file)
index 0000000..b94353c
--- /dev/null
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
index 8a67226..ddb776f 100644 (file)
@@ -3,7 +3,7 @@
      [
        {
          "id": "STD_Fault_Messages",
-         "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages"
+         "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100"
        }
    ]
  }
\ No newline at end of file
index 2b39d15..a4ecc91 100644 (file)
@@ -1,24 +1,24 @@
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: $MR_DMAAP_KUBE_APP_NAME
+  name: $MR_DMAAP_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
     autotest: DMAAPMR
 spec:
   replicas: 1
   selector:
     matchLabels:
-      run: $MR_DMAAP_KUBE_APP_NAME
+      run: $MR_DMAAP_APP_NAME
   template:
     metadata:
       labels:
-        run: $MR_DMAAP_KUBE_APP_NAME
+        run: $MR_DMAAP_APP_NAME
         autotest: DMAAPMR
     spec:
       containers:
-      - name: $MR_DMAAP_KUBE_APP_NAME
+      - name: $MR_DMAAP_APP_NAME
         image: $ONAP_DMAAPMR_IMAGE
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
@@ -33,11 +33,9 @@ spec:
         - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
           subPath: MsgRtrApi.properties
           name: dmaapmr-msg-rtr-api
-        volumeMounts:
         - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
           subPath: logback.xml
           name: dmaapmr-log-back
-        volumeMounts:
         - mountPath: /appl/dmaapMR1/etc/cadi.properties
           subPath: cadi.properties
           name: dmaapmr-cadi
@@ -58,34 +56,34 @@ spec:
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: $MR_KAFKA_BWDS_NAME
+  name: $MR_KAFKA_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
     autotest: DMAAPMR
 spec:
   replicas: 1
   selector:
     matchLabels:
-      run: $MR_KAFKA_BWDS_NAME
+      run: $MR_KAFKA_APP_NAME
   template:
     metadata:
       labels:
-        run: $MR_KAFKA_BWDS_NAME
+        run: $MR_KAFKA_APP_NAME
         autotest: DMAAPMR
     spec:
       containers:
-      - name: $MR_KAFKA_BWDS_NAME
+      - name: $MR_KAFKA_APP_NAME
         image: $ONAP_KAFKA_IMAGE
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
         - name: http
-          containerPort: 9095
+          containerPort: $MR_KAFKA_PORT
         env:
         - name: enableCadi
           value: 'false'
         - name: KAFKA_ZOOKEEPER_CONNECT
-          value: 'zookeeper.onap:2181'
+          value: '$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT'
         - name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS
           value: '40000'
         - name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS
@@ -93,11 +91,11 @@ spec:
         - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
           value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
         - name: KAFKA_ADVERTISED_LISTENERS
-          value: 'INTERNAL_PLAINTEXT://kaka:9092'
-#        - name: KAFKA_ADVERTISED_LISTENERS
-#          value: 'INTERNAL_PLAINTEXT://localhost:9092'
+          value: 'INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT'
         - name: KAFKA_LISTENERS
-          value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092'
+          value: 'INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
+        # - name: KAFKA_LISTENERS
+        #   value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
         - name: KAFKA_INTER_BROKER_LISTENER_NAME
           value: INTERNAL_PLAINTEXT
         - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
@@ -105,12 +103,11 @@ spec:
         - name: KAFKA_OPTS
           value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf'
         - name: KAFKA_ZOOKEEPER_SET_ACL
-          value: 'true'
+          value: 'false'
         - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
           value: '1'
         - name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
           value: '1'
-
         volumeMounts:
         - mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
           subPath: zk_client_jaas.conf
@@ -146,7 +143,7 @@ spec:
         imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
         ports:
         - name: http
-          containerPort: 2181
+          containerPort: $MR_ZOOKEEPER_PORT
         env:
         - name: ZOOKEEPER_REPLICAS
           value: '1'
@@ -163,7 +160,7 @@ spec:
         - name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
           value: '24'
         - name: ZOOKEEPER_CLIENT_PORT
-          value: '2181'
+          value: '$MR_ZOOKEEPER_PORT'
         - name: KAFKA_OPTS
           value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
         - name: ZOOKEEPER_SERVER_ID
@@ -1,6 +1,7 @@
 # LICENSE_START=======================================================
 #  org.onap.dmaap
 #  ================================================================================
+#  Copyright Ã‚© 2021 Nordix Foundation. All rights reserved.
 #  Copyright Â© 2017 AT&T Intellectual Property. All rights reserved.
 #  ================================================================================
 #  Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,7 +35,7 @@
 ##
 ## Both Cambria and Kafka make use of Zookeeper.
 ##
-config.zk.servers=zookeeper:2181
+config.zk.servers=$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
 
 ###############################################################################
 ##
@@ -45,7 +46,7 @@ config.zk.servers=zookeeper:2181
 ##        if you want to change request.required.acks it can take this one value
 #kafka.metadata.broker.list=localhost:9092,localhost:9093
 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
 ##kafka.request.required.acks=-1
 #kafka.client.zookeeper=${config.zk.servers}
 consumer.timeout.ms=100
@@ -135,7 +136,7 @@ cambria.consumer.cache.touchFreqMs=120000
 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
 consumer.timeout=17
 default.partitions=3
-default.replicas=3
+default.replicas=1
 ##############################################################################
 #100mb
 maxcontentlength=10000
diff --git a/test/simulator-group/dmaapmr/configs/mr/cadi.properties b/test/simulator-group/dmaapmr/configs/mr/cadi.properties
new file mode 100644 (file)
index 0000000..6178e42
--- /dev/null
@@ -0,0 +1,38 @@
+#  ============LICENSE_START===============================================
+#  Copyright (C) 2021 Nordix Foundation. All rights reserved.
+#  ========================================================================
+#  Licensed under the Apache License, Version 2.0 (the "License");
+#  you may not use this file except in compliance with the License.
+#  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#  ============LICENSE_END=================================================
+#
+
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
@@ -1,5 +1,6 @@
 <!--
      ============LICENSE_START=======================================================
+     Copyright Ã‚© 2021 Nordix Foundation. All rights reserved.
      Copyright Â© 2019 AT&T Intellectual Property. All rights reserved.
      ================================================================================
      Licensed under the Apache License, Version 2.0 (the "License");
index 6b5c9c2..f9a5f21 100644 (file)
@@ -26,7 +26,7 @@ services:
     image: $ONAP_ZOOKEEPER_IMAGE
     container_name: $MR_ZOOKEEPER_APP_NAME
     ports:
-      - "2181:2181"
+      - "$MR_ZOOKEEPER_PORT:$MR_ZOOKEEPER_PORT"
     environment:
      ZOOKEEPER_REPLICAS: 1
      ZOOKEEPER_TICK_TIME: 2000
@@ -35,7 +35,7 @@ services:
      ZOOKEEPER_MAX_CLIENT_CNXNS: 200
      ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
      ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
-     ZOOKEEPER_CLIENT_PORT: 2181
+     ZOOKEEPER_CLIENT_PORT: $MR_ZOOKEEPER_PORT
      KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl -Dzookeeper.4lw.commands.whitelist=*
      ZOOKEEPER_SERVER_ID: 1
     volumes:
@@ -50,15 +50,15 @@ services:
    image: $ONAP_KAFKA_IMAGE
    container_name: $MR_KAFKA_APP_NAME
    ports:
-    - "9092:9092"
+    - "$MR_KAFKA_PORT:$MR_KAFKA_PORT"
    environment:
     enableCadi: 'false'
-    KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+    KAFKA_ZOOKEEPER_CONNECT: $MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
     KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
     KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
     KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
-    KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
-    KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+    KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
+    KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT
     KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
     KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
     KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
diff --git a/test/simulator-group/dmaapmr/mnt/.gitignore b/test/simulator-group/dmaapmr/mnt/.gitignore
new file mode 100644 (file)
index 0000000..b94353c
--- /dev/null
@@ -0,0 +1,17 @@
+################################################################################
+#   Copyright (c) 2021 Nordix Foundation.                                      #
+#                                                                              #
+#   Licensed under the Apache License, Version 2.0 (the "License");            #
+#   you may not use this file except in compliance with the License.           #
+#   You may obtain a copy of the License at                                    #
+#                                                                              #
+#       http://www.apache.org/licenses/LICENSE-2.0                             #
+#                                                                              #
+#   Unless required by applicable law or agreed to in writing, software        #
+#   distributed under the License is distributed on an "AS IS" BASIS,          #
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #
+#   See the License for the specific language governing permissions and        #
+#   limitations under the License.                                             #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
index dca46d5..79a7601 100644 (file)
@@ -1,6 +1,5 @@
 Client {
-   org.apache.zookeeper.server.auth.DigestLoginModule required
-   username="kafka"
-   password="kafka_secret";
- };
-
+  org.apache.zookeeper.server.auth.DigestLoginModule required
+  username="kafka"
+  password="kafka_secret";
+ };
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt/mr/KUBE-MsgRtrApi.properties
deleted file mode 100644 (file)
index e174b6f..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-# LICENSE_START=======================================================
-#  org.onap.dmaap
-#  ================================================================================
-#  Copyright Ã‚© 2020 Nordix Foundation. All rights reserved.
-#  Copyright Ã‚© 2017 AT&T Intellectual Property. All rights reserved.
-#  ================================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=========================================================
-#
-#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
-config.zk.servers=zookeeper.onap:2181
-
-#config.zk.root=/fe3c/cambria/config
-
-
-###############################################################################
-##
-## Kafka Connection
-##
-##        Items below are passed through to Kafka's producer and consumer
-##        configurations (after removing "kafka.")
-##        if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=akfak-bwds.onap:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-##        Secured Config
-##
-##        Some data stored in the config system is sensitive -- API keys and secrets,
-##        for example. to protect it, we use an encryption layer for this section
-##        of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-##        Kafka expects live connections from the consumer to the broker, which
-##        obviously doesn't work over connectionless HTTP requests. The Cambria
-##        server proxies HTTP requests into Kafka consumer sessions that are kept
-##        around for later re-use. Not doing so is costly for setup per request,
-##        which would substantially impact a high volume consumer's performance.
-##
-##        This complicates Cambria server failover, because we often need server
-##        A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-##        This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
index dc5ddd7..3476d5d 100644 (file)
@@ -1,8 +1,7 @@
 # LICENSE_START=======================================================
 #  org.onap.dmaap
 #  ================================================================================
-#  Copyright Ã‚© 2020 Nordix Foundation. All rights reserved.
-#  Copyright Ã‚© 2017 AT&T Intellectual Property. All rights reserved.
+#  Copyright Â© 2017 AT&T Intellectual Property. All rights reserved.
 #  ================================================================================
 #  Licensed under the Apache License, Version 2.0 (the "License");
 #  you may not use this file except in compliance with the License.
 ##
 ## Both Cambria and Kafka make use of Zookeeper.
 ##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
 config.zk.servers=zookeeper:2181
 
-#config.zk.root=/fe3c/cambria/config
-
-
 ###############################################################################
 ##
 ## Kafka Connection
@@ -51,7 +45,7 @@ config.zk.servers=zookeeper:2181
 ##        if you want to change request.required.acks it can take this one value
 #kafka.metadata.broker.list=localhost:9092,localhost:9093
 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=message-router-kafka:9092
 ##kafka.request.required.acks=-1
 #kafka.client.zookeeper=${config.zk.servers}
 consumer.timeout.ms=100
@@ -87,8 +81,6 @@ kafka.rebalance.max.retries=6
 cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
 cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
 authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
 
 
 ###############################################################################
@@ -136,13 +128,14 @@ cambria.consumer.cache.touchFreqMs=120000
 ##        This server can report its metrics periodically on a topic.
 ##
 #metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics                                  #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
 #metrics.send.cambria.sendEverySeconds=60
 
 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
 consumer.timeout=17
 default.partitions=3
-default.replicas=3
+default.replicas=1
 ##############################################################################
 #100mb
 maxcontentlength=10000
@@ -170,5 +163,4 @@ msgRtr.mirrormaker.consumerid=1
 kafka.max.poll.interval.ms=300000
 kafka.heartbeat.interval.ms=60000
 kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
+kafka.max.poll.records=1000
\ No newline at end of file
index 4d28b52..cccfbdf 100644 (file)
@@ -1,20 +1,21 @@
-aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
 aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
 aaf_env=DEV
 aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
 
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
 
 cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
 
 cadi_alias=dmaapmr@mr.dmaap.onap.org
 cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
 cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
 
 cadi_loglevel=INFO
 cadi_protocols=TLSv1.1,TLSv1.2
 cadi_latitude=37.78187
-cadi_longitude=-122.26147
-
+cadi_longitude=-122.26147
\ No newline at end of file
index 02499fb..f02a2db 100644 (file)
@@ -1,7 +1,6 @@
 <!--
      ============LICENSE_START=======================================================
-     Copyright Ã‚© 2020 Nordix Foundation. All rights reserved.
-     Copyright Ã‚© 2019 AT&T Intellectual Property. All rights reserved.
+     Copyright Â© 2019 AT&T Intellectual Property. All rights reserved.
      ================================================================================
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
   </root>
 
 </configuration>
-
index 9a32a72..3d2767f 100644 (file)
@@ -1,5 +1,4 @@
 Server {
        org.apache.zookeeper.server.auth.DigestLoginModule required
-       user_kafka=kafka_secret;
-};
-
+       user_kafka="kafka_secret";
+};
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties b/test/simulator-group/dmaapmr/mnt2/mr/KUBE-MsgRtrApi.properties
deleted file mode 100644 (file)
index 7f7bc41..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-# LICENSE_START=======================================================
-#  org.onap.dmaap
-#  ================================================================================
-#  Copyright Â© 2017 AT&T Intellectual Property. All rights reserved.
-#  ================================================================================
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#  ============LICENSE_END=========================================================
-#
-#  ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-config.zk.servers=zookeeper:2181
-
-###############################################################################
-##
-## Kafka Connection
-##
-##        Items below are passed through to Kafka's producer and consumer
-##        configurations (after removing "kafka.")
-##        if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kaka:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-##        Secured Config
-##
-##        Some data stored in the config system is sensitive -- API keys and secrets,
-##        for example. to protect it, we use an encryption layer for this section
-##        of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-##        Kafka expects live connections from the consumer to the broker, which
-##        obviously doesn't work over connectionless HTTP requests. The Cambria
-##        server proxies HTTP requests into Kafka consumer sessions that are kept
-##        around for later re-use. Not doing so is costly for setup per request,
-##        which would substantially impact a high volume consumer's performance.
-##
-##        This complicates Cambria server failover, because we often need server
-##        A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-##        This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics
-#msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
\ No newline at end of file
diff --git a/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties b/test/simulator-group/dmaapmr/mnt2/mr/cadi.properties
deleted file mode 100644 (file)
index 3cd26ad..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
-aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
-aaf_env=DEV
-aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
-
-cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
-
-cadi_alias=dmaapmr@mr.dmaap.onap.org
-cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
-cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
-
-cadi_loglevel=INFO
-cadi_protocols=TLSv1.1,TLSv1.2
-cadi_latitude=37.78187
-cadi_longitude=-122.26147
\ No newline at end of file
index e5d5d8e..7fb0962 100644 (file)
@@ -1,10 +1,10 @@
 apiVersion: v1
 kind: Service
 metadata:
-  name: $MR_DMAAP_KUBE_APP_NAME
+  name: $MR_DMAAP_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
     autotest: DMAAPMR
 spec:
   type: ClusterIP
@@ -18,25 +18,25 @@ spec:
     protocol: TCP
     name: https
   selector:
-    run: $MR_DMAAP_KUBE_APP_NAME
+    run: $MR_DMAAP_APP_NAME
 ---
 apiVersion: v1
 kind: Service
 metadata:
-  name: $MR_KAFKA_BWDS_NAME
+  name: $MR_KAFKA_APP_NAME
   namespace: $KUBE_ONAP_NAMESPACE
   labels:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
     autotest: DMAAPMR
 spec:
   type: ClusterIP
   ports:
-  - port: 9092
-    targetPort: 9095
+  - port: $MR_KAFKA_PORT
+    targetPort: $MR_KAFKA_PORT
     protocol: TCP
     name: http
   selector:
-    run: $MR_KAFKA_BWDS_NAME
+    run: $MR_KAFKA_APP_NAME
 ---
 apiVersion: v1
 kind: Service
@@ -49,87 +49,9 @@ metadata:
 spec:
   type: ClusterIP
   ports:
-  - port: 2181
-    targetPort: 2181
+  - port: $MR_ZOOKEEPER_PORT
+    targetPort: $MR_ZOOKEEPER_PORT
     protocol: TCP
     name: http
   selector:
     run: $MR_ZOOKEEPER_APP_NAME
-
-
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-mr
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_DMAAP_KUBE_APP_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: $MR_EXTERNAL_PORT
-#     targetPort: $MR_INTERNAL_PORT
-#     protocol: TCP
-#     name: http
-#   - port: $MR_EXTERNAL_SECURE_PORT
-#     targetPort: $MR_INTERNAL_SECURE_PORT
-#     protocol: TCP
-#     name: https
-#   selector:
-#     run: $MR_DMAAP_KUBE_APP_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-kafka
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_KAFKA_BWDS_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 9092
-#     targetPort: 9092
-#     protocol: TCP
-#     name: http
-#   selector:
-#     run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: kafka
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_KAFKA_BWDS_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 9092
-#     targetPort: 9092
-#     protocol: TCP
-#     name: http
-#   selector:
-#     run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-#   name: dmaap-zookeeper
-#   namespace: $KUBE_ONAP_NAMESPACE
-#   labels:
-#     run: $MR_ZOOKEEPER_APP_NAME
-#     autotest: DMAAPMR
-# spec:
-#   type: ClusterIP
-#   ports:
-#   - port: 2181
-#     targetPort: 2181
-#     protocol: TCP
-#     name: http
-#   selector:
-    run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file
index 0cf0f51..696af4e 100644 (file)
@@ -30,4 +30,6 @@ spec:
         - name: TOPIC_READ
           value: $TOPIC_READ
         - name: TOPIC_WRITE
-          value: $TOPIC_WRITE
\ No newline at end of file
+          value: $TOPIC_WRITE
+        - name: GENERIC_TOPICS_UPLOAD_BASEURL
+          value: $GENERIC_TOPICS_UPLOAD_BASEURL
\ No newline at end of file
index 9101b5b..a1c96c0 100644 (file)
@@ -34,6 +34,7 @@ services:
     environment:
       - TOPIC_READ=${TOPIC_READ}
       - TOPIC_WRITE=${TOPIC_WRITE}
+      - GENERIC_TOPICS_UPLOAD_BASEURL=${GENERIC_TOPICS_UPLOAD_BASEURL}
     labels:
       - "nrttest_app=MR"
       - "nrttest_dp=${MR_STUB_DISPLAY_NAME}"
index 45f0f08..c794e67 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -46,7 +46,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
index 8861fe0..1824cd1 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -62,7 +62,7 @@ apiVersion: apps/v1
 kind: Deployment
 metadata:
   name: $SDNC_DB_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
index 45af8b6..f172c1c 100644 (file)
@@ -2,7 +2,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: $SDNC_APP_NAME
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_APP_NAME
     autotest: SDNC
@@ -24,7 +24,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: dbhost
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
@@ -42,7 +42,7 @@ apiVersion: v1
 kind: Service
 metadata:
   name: sdnctldb01
-  namespace: $KUBE_SNDC_NAMESPACE
+  namespace: $KUBE_SDNC_NAMESPACE
   labels:
     run: $SDNC_DB_APP_NAME
     autotest: SDNC
index 1ca37b3..ec542c1 100644 (file)
@@ -2,25 +2,52 @@
 
 This consumer creates a job of type `STD_Fault_Messages` in the Information Coordinator Service (ICS). When it recieves messages, it checks if they are link failure messages. If they are, it checks if the event severity is other than normal. If so, it looks up the O-DU ID mapped to the O-RU the message originates from and sends a configuration message to the O-DU through SDNC. If the event severity is normal, then it logs, on `Debug` level, that the link failure has been cleared.
 
-The producer takes a number of environment variables, described below, as configuration.
+## Configuration
+
+The consumer takes a number of environment variables, described below, as configuration.
 
 >- CONSUMER_HOST        **Required**. The host for the consumer.                                   Example: `http://mrproducer`
->- CONSUMER_HOST        **Required**. The port for the consumer.                                   Example: `8095`
->- LOG_LEVEL            Optional. The log level, which can be `Error`, `Warn`, `Info` or `Debug`.  Defaults to `Info`.
+>- CONSUMER_PORT        **Required**. The port for the consumer.                                   Example: `8095`
+>- CONSUMER_CERT_PATH   **Required**. The path to the certificate to use for https.                Defaults to `security/producer.crt`
+>- CONSUMER_KEY_PATH    **Required**. The path to the key to the certificate to use for https.     Defaults to `security/producer.key`
 >- INFO_COORD_ADDR      Optional. The address of the Information Coordinator.                      Defaults to `http://enrichmentservice:8083`.
->- SDNR_HOST            Optional. The host for SDNR.                                               Defaults to `http://localhost`.
->- SDNR_PORT            Optional. The port for SDNR.                                               Defaults to `3904`.
+>- SDNR_ADDRESS         Optional. The address for SDNR.                                            Defaults to `http://localhost:3904`.
 >- SDNR_USER            Optional. The user for the SDNR.                                           Defaults to `admin`.
 >- SDNR_PASSWORD        Optional. The password for the SDNR user.                                  Defaults to `Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U`.
 >- ORU_TO_ODU_MAP_FILE  Optional. The file containing the mapping from O-RU ID to O-DU ID.         Defaults to `o-ru-to-o-du-map.csv`.
+>- LOG_LEVEL            Optional. The log level, which can be `Error`, `Warn`, `Info` or `Debug`.  Defaults to `Info`.
+
+Any of the addresses used by this product can be configured to use https, by specifying it as the scheme of the address URI. The client will not use server certificate verification. The consumer's own callback will only listen to the scheme configured in the scheme of the consumer host address.
+
+The configured public key and cerificate shall be PEM-encoded. A self signed certificate and key are provided in the `security` folder of the project. These files should be replaced for production. To generate a self signed key and certificate, use the example code below:
+
+    openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
 
-The creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below.
+T## Functionality
+
+he creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below.
 
 >- /start  Creates the job in ICS.
 >- /stop   Deletes the job in ICS.
 
 If the consumer is shut down with a SIGTERM, it will also delete the job before exiting.
 
+## Development
+
+To make it easy to test during development of the consumer, two stubs are provided in the `stub` folder.
+
+One, under the `producer` folder, called `producer` that stubs the producer and pushes an array with one message with `eventSeverity` alternating between `NORMAL` and `CRITICAL`. To build and start the stub, do the following:
+>1. cd stub/producer
+>2. go build
+>3. ./producer
+
+One, under the `sdnr` folder, called `sdnr` that at startup will listen for REST calls and print the body of them. By default, it listens to the port `3904`, but his can be overridden by passing a `-port [PORT]` flag when starting the stub. To build and start the stub, do the following:
+>1. cd stub/sdnr
+>2. go build
+>3. ./sdnr
+
+Mocks needed for unit tests have been generated using `github.com/stretchr/testify/mock` and are checked in under the `mocks` folder. **Note!** Keep in mind that if any of the mocked interfaces change, a new mock for that interface must be generated and checked in.
+
 ## License
 
 Copyright (C) 2021 Nordix Foundation.
index 754bba1..2eaa371 100644 (file)
@@ -9,10 +9,14 @@ require (
 
 require (
        github.com/davecgh/go-spew v1.1.1 // indirect
-       github.com/google/uuid v1.3.0 // indirect
-       github.com/gorilla/mux v1.8.0 // indirect
+       github.com/gorilla/mux v1.8.0
        github.com/pmezard/go-difflib v1.0.0 // indirect
        github.com/stretchr/objx v0.1.1 // indirect
        golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 // indirect
        gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
 )
+
+require (
+       github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
+       github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
+)
index 6ce7604..970999b 100644 (file)
@@ -5,6 +5,11 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
 github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
 github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4=
+github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
index 43656b7..718f435 100644 (file)
@@ -21,6 +21,7 @@
 package config
 
 import (
+       "fmt"
        "os"
        "strconv"
 
@@ -28,31 +29,37 @@ import (
 )
 
 type Config struct {
-       LogLevel               log.Level
        ConsumerHost           string
        ConsumerPort           int
        InfoCoordinatorAddress string
-       SDNRHost               string
-       SDNRPort               int
+       SDNRAddress            string
        SDNRUser               string
        SDNPassword            string
        ORUToODUMapFile        string
+       ConsumerCertPath       string
+       ConsumerKeyPath        string
+       LogLevel               log.Level
 }
 
 func New() *Config {
        return &Config{
-               LogLevel:               getLogLevel(),
                ConsumerHost:           getEnv("CONSUMER_HOST", ""),
                ConsumerPort:           getEnvAsInt("CONSUMER_PORT", 0),
                InfoCoordinatorAddress: getEnv("INFO_COORD_ADDR", "http://enrichmentservice:8083"),
-               SDNRHost:               getEnv("SDNR_HOST", "http://localhost"),
-               SDNRPort:               getEnvAsInt("SDNR_PORT", 3904),
+               SDNRAddress:            getEnv("SDNR_ADDR", "http://localhost:3904"),
                SDNRUser:               getEnv("SDNR_USER", "admin"),
                SDNPassword:            getEnv("SDNR_PASSWORD", "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"),
                ORUToODUMapFile:        getEnv("ORU_TO_ODU_MAP_FILE", "o-ru-to-o-du-map.csv"),
+               ConsumerCertPath:       getEnv("CONSUMER_CERT_PATH", "security/consumer.crt"),
+               ConsumerKeyPath:        getEnv("CONSUMER_KEY_PATH", "security/consumer.key"),
+               LogLevel:               getLogLevel(),
        }
 }
 
+func (c Config) String() string {
+       return fmt.Sprintf("ConsumerHost: %v, ConsumerPort: %v, InfoCoordinatorAddress: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, ORUToODUMapFile: %v, ConsumerCertPath: %v, ConsumerKeyPath: %v, LogLevel: %v", c.ConsumerHost, c.ConsumerPort, c.InfoCoordinatorAddress, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.ORUToODUMapFile, c.ConsumerCertPath, c.ConsumerKeyPath, c.LogLevel)
+}
+
 func getEnv(key string, defaultVal string) string {
        if value, exists := os.LookupEnv(key); exists {
                return value
index a5b1624..3d9983a 100644 (file)
@@ -31,28 +31,30 @@ import (
 
 func TestNew_envVarsSetConfigContainSetValues(t *testing.T) {
        assertions := require.New(t)
-       os.Setenv("LOG_LEVEL", "Debug")
        os.Setenv("CONSUMER_HOST", "consumerHost")
        os.Setenv("CONSUMER_PORT", "8095")
        os.Setenv("INFO_COORD_ADDR", "infoCoordAddr")
-       os.Setenv("SDNR_HOST", "sdnrHost")
-       os.Setenv("SDNR_PORT", "3908")
+       os.Setenv("SDNR_ADDR", "sdnrHost:3908")
        os.Setenv("SDNR_USER", "admin")
        os.Setenv("SDNR_PASSWORD", "pwd")
        os.Setenv("ORU_TO_ODU_MAP_FILE", "file")
+       os.Setenv("CONSUMER_CERT_PATH", "cert")
+       os.Setenv("CONSUMER_KEY_PATH", "key")
+       os.Setenv("LOG_LEVEL", "Debug")
        t.Cleanup(func() {
                os.Clearenv()
        })
        wantConfig := Config{
-               LogLevel:               log.DebugLevel,
                ConsumerHost:           "consumerHost",
                ConsumerPort:           8095,
                InfoCoordinatorAddress: "infoCoordAddr",
-               SDNRHost:               "sdnrHost",
-               SDNRPort:               3908,
+               SDNRAddress:            "sdnrHost:3908",
                SDNRUser:               "admin",
                SDNPassword:            "pwd",
                ORUToODUMapFile:        "file",
+               ConsumerCertPath:       "cert",
+               ConsumerKeyPath:        "key",
+               LogLevel:               log.DebugLevel,
        }
 
        got := New()
@@ -70,15 +72,16 @@ func TestNew_faultyIntValueSetConfigContainDefaultValueAndWarnInLog(t *testing.T
                os.Clearenv()
        })
        wantConfig := Config{
-               LogLevel:               log.InfoLevel,
                ConsumerHost:           "",
                ConsumerPort:           0,
                InfoCoordinatorAddress: "http://enrichmentservice:8083",
-               SDNRHost:               "http://localhost",
-               SDNRPort:               3904,
+               SDNRAddress:            "http://localhost:3904",
                SDNRUser:               "admin",
                SDNPassword:            "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
                ORUToODUMapFile:        "o-ru-to-o-du-map.csv",
+               ConsumerCertPath:       "security/consumer.crt",
+               ConsumerKeyPath:        "security/consumer.key",
+               LogLevel:               log.InfoLevel,
        }
 
        got := New()
@@ -99,15 +102,16 @@ func TestNew_envFaultyLogLevelConfigContainDefaultValues(t *testing.T) {
                os.Clearenv()
        })
        wantConfig := Config{
-               LogLevel:               log.InfoLevel,
                ConsumerHost:           "",
                ConsumerPort:           0,
                InfoCoordinatorAddress: "http://enrichmentservice:8083",
-               SDNRHost:               "http://localhost",
-               SDNRPort:               3904,
+               SDNRAddress:            "http://localhost:3904",
                SDNRUser:               "admin",
                SDNPassword:            "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
                ORUToODUMapFile:        "o-ru-to-o-du-map.csv",
+               ConsumerCertPath:       "security/consumer.crt",
+               ConsumerKeyPath:        "security/consumer.key",
+               LogLevel:               log.InfoLevel,
        }
        got := New()
        assertions.Equal(&wantConfig, got)
index 01f121a..3aecf45 100644 (file)
@@ -76,10 +76,10 @@ func (lfh LinkFailureHandler) sendUnlockMessage(oRuId string) {
                if error := restclient.Put(lfh.config.SDNRAddress+sdnrPath, unlockMessage, lfh.client, lfh.config.SDNRUser, lfh.config.SDNRPassword); error == nil {
                        log.Debugf("Sent unlock message for O-RU: %v to O-DU: %v.", oRuId, oDuId)
                } else {
-                       log.Warn(error)
+                       log.Warn("Send of unlock message failed due to ", error)
                }
        } else {
-               log.Warn(err)
+               log.Warn("Send of unlock message failed due to ", err)
        }
 
 }
index 036819a..fdd0549 100644 (file)
@@ -22,9 +22,15 @@ package restclient
 
 import (
        "bytes"
+       "crypto/tls"
        "fmt"
        "io"
+       "math"
        "net/http"
+       "net/url"
+       "time"
+
+       "github.com/hashicorp/go-retryablehttp"
 )
 
 type RequestError struct {
@@ -33,7 +39,7 @@ type RequestError struct {
 }
 
 func (e RequestError) Error() string {
-       return fmt.Sprintf("Request failed due to error response with status: %v and body: %v", e.StatusCode, string(e.Body))
+       return fmt.Sprintf("error response with status: %v and body: %v", e.StatusCode, string(e.Body))
 }
 
 // HTTPClient interface
@@ -55,6 +61,40 @@ func Delete(url string, client HTTPClient) error {
        return do(http.MethodDelete, url, nil, client)
 }
 
+func CreateClientCertificate(certPath string, keyPath string) (tls.Certificate, error) {
+       if cert, err := tls.LoadX509KeyPair(certPath, keyPath); err == nil {
+               return cert, nil
+       } else {
+               return tls.Certificate{}, fmt.Errorf("cannot create x509 keypair from cert file %s and key file %s due to: %v", certPath, keyPath, err)
+       }
+}
+
+func CreateRetryClient(cert tls.Certificate) *http.Client {
+       rawRetryClient := retryablehttp.NewClient()
+       rawRetryClient.RetryWaitMax = time.Minute
+       rawRetryClient.RetryMax = math.MaxInt
+       rawRetryClient.HTTPClient.Transport = getSecureTransportWithoutVerify(cert)
+
+       client := rawRetryClient.StandardClient()
+       return client
+}
+
+func IsUrlSecure(configUrl string) bool {
+       u, _ := url.Parse(configUrl)
+       return u.Scheme == "https"
+}
+
+func getSecureTransportWithoutVerify(cert tls.Certificate) *http.Transport {
+       return &http.Transport{
+               TLSClientConfig: &tls.Config{
+                       Certificates: []tls.Certificate{
+                               cert,
+                       },
+                       InsecureSkipVerify: true,
+               },
+       }
+}
+
 func do(method string, url string, body []byte, client HTTPClient, userInfo ...string) error {
        if req, reqErr := http.NewRequest(method, url, bytes.NewBuffer(body)); reqErr == nil {
                if body != nil {
index 8271fd0..2c915fd 100644 (file)
@@ -21,11 +21,16 @@ package restclient
 
 import (
        "bytes"
+       "crypto/tls"
        "fmt"
        "io/ioutil"
+       "math"
        "net/http"
+       "reflect"
        "testing"
+       "time"
 
+       "github.com/hashicorp/go-retryablehttp"
        "github.com/stretchr/testify/mock"
        "github.com/stretchr/testify/require"
        "oransc.org/usecase/oruclosedloop/mocks"
@@ -38,7 +43,7 @@ func TestRequestError_Error(t *testing.T) {
                StatusCode: http.StatusBadRequest,
                Body:       []byte("error"),
        }
-       assertions.Equal("Request failed due to error response with status: 400 and body: error", actualError.Error())
+       assertions.Equal("error response with status: 400 and body: error", actualError.Error())
 }
 
 func TestPutWithoutAuth(t *testing.T) {
@@ -167,3 +172,63 @@ func Test_doErrorCases(t *testing.T) {
                })
        }
 }
+
+func Test_createClientCertificate(t *testing.T) {
+       assertions := require.New(t)
+       wantedCert, _ := tls.LoadX509KeyPair("../../security/consumer.crt", "../../security/consumer.key")
+       type args struct {
+               certPath string
+               keyPath  string
+       }
+       tests := []struct {
+               name     string
+               args     args
+               wantCert tls.Certificate
+               wantErr  error
+       }{
+               {
+                       name: "Paths to cert info ok should return cerftificate",
+                       args: args{
+                               certPath: "../../security/consumer.crt",
+                               keyPath:  "../../security/consumer.key",
+                       },
+                       wantCert: wantedCert,
+               },
+               {
+                       name: "Paths to cert info not ok should return error with info about error",
+                       args: args{
+                               certPath: "wrong_cert",
+                               keyPath:  "wrong_key",
+                       },
+                       wantErr: fmt.Errorf("cannot create x509 keypair from cert file wrong_cert and key file wrong_key due to: open wrong_cert: no such file or directory"),
+               },
+       }
+       for _, tt := range tests {
+               t.Run(tt.name, func(t *testing.T) {
+                       cert, err := CreateClientCertificate(tt.args.certPath, tt.args.keyPath)
+                       assertions.Equal(tt.wantCert, cert, tt.name)
+                       assertions.Equal(tt.wantErr, err, tt.name)
+               })
+       }
+}
+
+func Test_CreateRetryClient(t *testing.T) {
+       assertions := require.New(t)
+
+       client := CreateRetryClient(tls.Certificate{})
+
+       transport := client.Transport
+       assertions.Equal("*retryablehttp.RoundTripper", reflect.TypeOf(transport).String())
+       retryableTransport := transport.(*retryablehttp.RoundTripper)
+       retryableClient := retryableTransport.Client
+       assertions.Equal(time.Minute, retryableClient.RetryWaitMax)
+       assertions.Equal(math.MaxInt, retryableClient.RetryMax)
+}
+
+func TestIsUrlSecured(t *testing.T) {
+       assertions := require.New(t)
+
+       assertions.True(IsUrlSecure("https://url"))
+
+       assertions.False(IsUrlSecure("http://url"))
+}
index bef9e24..b7d6895 100644 (file)
 package main
 
 import (
+       "crypto/tls"
        "encoding/json"
        "fmt"
        "net/http"
        "os"
        "os/signal"
        "syscall"
-       "time"
 
        "github.com/gorilla/mux"
        log "github.com/sirupsen/logrus"
@@ -41,7 +41,6 @@ type Server interface {
        ListenAndServe() error
 }
 
-const timeoutHTTPClient = time.Second * 5
 const jobId = "14e7bb84-a44d-44c1-90b7-6995a92ad43c"
 
 var jobRegistrationInfo = struct {
@@ -70,16 +69,13 @@ func doInit() {
        configuration = config.New()
 
        log.SetLevel(configuration.LogLevel)
-
-       client = &http.Client{
-               Timeout: timeoutHTTPClient,
-       }
+       log.Debug("Using configuration: ", configuration)
 
        consumerPort = fmt.Sprint(configuration.ConsumerPort)
        jobRegistrationInfo.JobResultUri = configuration.ConsumerHost + ":" + consumerPort
 
        linkfailureConfig = linkfailure.Configuration{
-               SDNRAddress:  configuration.SDNRHost + ":" + fmt.Sprint(configuration.SDNRPort),
+               SDNRAddress:  configuration.SDNRAddress,
                SDNRUser:     configuration.SDNRUser,
                SDNRPassword: configuration.SDNPassword,
        }
@@ -95,12 +91,16 @@ func main() {
                log.Fatalf("Unable to create LookupService due to inability to get O-RU-ID to O-DU-ID map. Cause: %v", initErr)
        }
 
+       var cert tls.Certificate
+       if c, err := restclient.CreateClientCertificate(configuration.ConsumerCertPath, configuration.ConsumerKeyPath); err == nil {
+               cert = c
+       } else {
+               log.Fatalf("Stopping producer due to error: %v", err)
+       }
+       client = restclient.CreateRetryClient(cert)
+
        go func() {
-               startServer(&http.Server{
-                       Addr:    ":" + consumerPort,
-                       Handler: getRouter(),
-               })
-               deleteJob()
+               startServer()
                os.Exit(1) // If the startServer function exits, it is because there has been a failure in the server, so we exit.
        }()
 
@@ -116,6 +116,11 @@ func validateConfiguration(configuration *config.Config) error {
        if configuration.ConsumerHost == "" || configuration.ConsumerPort == 0 {
                return fmt.Errorf("consumer host and port must be provided")
        }
+
+       if configuration.ConsumerCertPath == "" || configuration.ConsumerKeyPath == "" {
+               return fmt.Errorf("missing CONSUMER_CERT and/or CONSUMER_KEY")
+       }
+
        return nil
 }
 
@@ -135,8 +140,14 @@ func getRouter() *mux.Router {
        return r
 }
 
-func startServer(server Server) {
-       if err := server.ListenAndServe(); err != nil {
+func startServer() {
+       var err error
+       if restclient.IsUrlSecure(configuration.ConsumerHost) {
+               err = http.ListenAndServeTLS(fmt.Sprintf(":%v", configuration.ConsumerPort), configuration.ConsumerCertPath, configuration.ConsumerKeyPath, getRouter())
+       } else {
+               err = http.ListenAndServe(fmt.Sprintf(":%v", configuration.ConsumerPort), getRouter())
+       }
+       if err != nil {
                log.Errorf("Server stopped unintentionally due to: %v. Deleteing job.", err)
                if deleteErr := deleteJob(); deleteErr != nil {
                        log.Error(fmt.Sprintf("Unable to delete consumer job due to: %v. Please remove job %v manually.", deleteErr, jobId))
index 99419bf..3fcb23e 100644 (file)
@@ -54,15 +54,16 @@ func Test_init(t *testing.T) {
        doInit()
 
        wantedConfiguration := &config.Config{
-               LogLevel:               log.InfoLevel,
                ConsumerHost:           "consumerHost",
                ConsumerPort:           8095,
                InfoCoordinatorAddress: "http://enrichmentservice:8083",
-               SDNRHost:               "http://localhost",
-               SDNRPort:               3904,
+               SDNRAddress:            "http://localhost:3904",
                SDNRUser:               "admin",
                SDNPassword:            "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
                ORUToODUMapFile:        "o-ru-to-o-du-map.csv",
+               ConsumerCertPath:       "security/consumer.crt",
+               ConsumerKeyPath:        "security/consumer.key",
+               LogLevel:               log.InfoLevel,
        }
        assertions.Equal(wantedConfiguration, configuration)
 
@@ -70,7 +71,7 @@ func Test_init(t *testing.T) {
        assertions.Equal(wantedConfiguration.ConsumerHost+":"+fmt.Sprint(wantedConfiguration.ConsumerPort), jobRegistrationInfo.JobResultUri)
 
        wantedLinkFailureConfig := linkfailure.Configuration{
-               SDNRAddress:  wantedConfiguration.SDNRHost + ":" + fmt.Sprint(wantedConfiguration.SDNRPort),
+               SDNRAddress:  wantedConfiguration.SDNRAddress,
                SDNRUser:     wantedConfiguration.SDNRUser,
                SDNRPassword: wantedConfiguration.SDNPassword,
        }
@@ -92,8 +93,10 @@ func Test_validateConfiguration(t *testing.T) {
                        name: "Valid config, should return nil",
                        args: args{
                                configuration: &config.Config{
-                                       ConsumerHost: "host",
-                                       ConsumerPort: 80,
+                                       ConsumerHost:     "host",
+                                       ConsumerPort:     80,
+                                       ConsumerCertPath: "security/consumer.crt",
+                                       ConsumerKeyPath:  "security/consumer.key",
                                },
                        },
                },
@@ -188,28 +191,6 @@ func Test_getRouter_shouldContainAllPathsWithHandlers(t *testing.T) {
        assertions.Equal("/admin/stop", path)
 }
 
-func Test_startServer_shouldDeleteJobWhenServerStopsWithErrorAndLog(t *testing.T) {
-       assertions := require.New(t)
-
-       var buf bytes.Buffer
-       log.SetOutput(&buf)
-
-       os.Setenv("CONSUMER_PORT", "wrong")
-       t.Cleanup(func() {
-               log.SetOutput(os.Stderr)
-       })
-
-       mockServer := &mocks.Server{}
-       mockServer.On("ListenAndServe").Return(errors.New("Server failure"))
-
-       startServer(mockServer)
-
-       log := buf.String()
-       assertions.Contains(log, "level=error")
-       assertions.Contains(log, "Server stopped unintentionally due to: Server failure. Deleteing job.")
-       assertions.Contains(log, "Please remove job 14e7bb84-a44d-44c1-90b7-6995a92ad43c manually")
-}
-
 func Test_startHandler(t *testing.T) {
        assertions := require.New(t)
 
diff --git a/test/usecases/oruclosedlooprecovery/goversion/mocks/Server.go b/test/usecases/oruclosedlooprecovery/goversion/mocks/Server.go
deleted file mode 100644 (file)
index ad16503..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// Code generated by mockery v1.0.0. DO NOT EDIT.
-
-package mocks
-
-import mock "github.com/stretchr/testify/mock"
-
-// Server is an autogenerated mock type for the Server type
-type Server struct {
-       mock.Mock
-}
-
-// ListenAndServe provides a mock function with given fields:
-func (_m *Server) ListenAndServe() error {
-       ret := _m.Called()
-
-       var r0 error
-       if rf, ok := ret.Get(0).(func() error); ok {
-               r0 = rf()
-       } else {
-               r0 = ret.Error(0)
-       }
-
-       return r0
-}
diff --git a/test/usecases/oruclosedlooprecovery/goversion/security/consumer.crt b/test/usecases/oruclosedlooprecovery/goversion/security/consumer.crt
new file mode 100644 (file)
index 0000000..0f6d8a3
--- /dev/null
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgIUEbuDTP0ixwxCxCQ9tR5DijGCbtkwDQYJKoZIhvcNAQEL
+BQAwPzELMAkGA1UEBhMCc2UxDDAKBgNVBAoMA0VTVDERMA8GA1UECwwIRXJpY3Nz
+b24xDzANBgNVBAMMBnNlcnZlcjAeFw0yMTEwMTkxNDA1MzVaFw0zMTEwMTcxNDA1
+MzVaMD8xCzAJBgNVBAYTAnNlMQwwCgYDVQQKDANFU1QxETAPBgNVBAsMCEVyaWNz
+c29uMQ8wDQYDVQQDDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQDnH4imV8kx/mXz6BDbq8e4oZGqGgv7V837iNspj/zIZXhEMP9311fdsZEE
+Y6VWU47bSYRn2xJOP+wmfKewbw0OcEWu/RkdvO7Y0VIVrlbEJYu88ZjK14dMUpfe
+72iMbTc5q2uYi0ImB5/m3jyMSXgso6NDWuvXrp2VSWjb1tG++des9rhvyrZyNrua
+I4iOnMvvuc71gvHol7appRu3+LRTQFYsAizdfHEQ9k949MZH4fiIu5NmCT/wNJVo
+uUZYYJseFhOlIANaXn6qmz7kKVYfxfV+Z5EccaRixaClCFwyRdmjgLyyeuI4/QPD
+x9PjmGmf6eOEC2ZHBi4OHwjIzmLnAgMBAAGjUzBRMB0GA1UdDgQWBBRjeDLPpLm2
+W623wna7xBCbHxtxVjAfBgNVHSMEGDAWgBRjeDLPpLm2W623wna7xBCbHxtxVjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAbFUAWFZaIMXmd5qv/
+xJYr1oPJpsmbgWGRWZWDZqbUabvWObyXlDJWIau60BerfcC5TmyElBjTyONSGwCT
+tq+SVB0PXpgqa8ZQ25Ytn2AMDFWhrGbOefCXs6te3HGq6BNubTWrOVIvJypCwC95
++iXVuDd4eg+n2fWv7h7fZRZHum/zLoRxB2lKoMMbc/BQX9hbtP6xyvIVvaYdhcJw
+VzJJGIDqpMiMH6IBaOFSmgfOyGblGKAicj3E3kpGBfadLx3R+9V6aG7zyBnVbr2w
+YJbV2Ay4PrF+PTpCMB/mNwC5RBTYHpSNdrCMSyq3I+QPVJq8dPJr7fd1Uwl3WHqX
+FV0h
+-----END CERTIFICATE-----
diff --git a/test/usecases/oruclosedlooprecovery/goversion/security/consumer.key b/test/usecases/oruclosedlooprecovery/goversion/security/consumer.key
new file mode 100644 (file)
index 0000000..5346bb7
--- /dev/null
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnH4imV8kx/mXz
+6BDbq8e4oZGqGgv7V837iNspj/zIZXhEMP9311fdsZEEY6VWU47bSYRn2xJOP+wm
+fKewbw0OcEWu/RkdvO7Y0VIVrlbEJYu88ZjK14dMUpfe72iMbTc5q2uYi0ImB5/m
+3jyMSXgso6NDWuvXrp2VSWjb1tG++des9rhvyrZyNruaI4iOnMvvuc71gvHol7ap
+pRu3+LRTQFYsAizdfHEQ9k949MZH4fiIu5NmCT/wNJVouUZYYJseFhOlIANaXn6q
+mz7kKVYfxfV+Z5EccaRixaClCFwyRdmjgLyyeuI4/QPDx9PjmGmf6eOEC2ZHBi4O
+HwjIzmLnAgMBAAECggEBAMq1lZyPkh8PCUyLVX3VhC4jRybyAWBI+piKx+4EI6l/
+laP5dZcegCoo+w/mdbTpRHqAWGjec4e9+Nkoq8rLG6B2SCfaRJUYiEQSEvSBHAid
+BZqKK4B82GXQavNU91Vy1OT3vD7mpPXF6jEK6gAA0C4Wt7Lzo7ZfqEavRBDMsNnV
+jOxLwWJCFSKhfeA6grJCnagmEDKSxxazlNBgCahjPf/+IRJZ7Vk4Zjq+I/5nWKf8
+lYaQExKDIANuM/jMRnYVq5k4g2MKHUADWGTSvG1DMJiMHzdxb2miZovpIkEE86bC
+wKBuele9IR6Rb/wygYj7WdaWysZ081OT7mNyju08e4ECgYEA8+q7vv4Nlz8bAcmY
+Ip5517s15M5D9iLsE2Q5m9Zs99rUyQv0E8ekpChhtTSdvj+eNl39O4hji46Gyceu
+MYPfNL7+YWaFDxuyaXEe/OFuKbFqgE1p08HXFcQJCvgqD1MWO5b9BRDc0qpNFIA8
+eN9xFBMQ2UFaALBMAup7Ef85q4kCgYEA8pKOAIsgmlnO8P9cPzkMC1oozslraAti
+1JnOJjwPLoHFubtH2u7WoIkSvNfeNwfrsVXwAP0m7C8p7qhYppS+0XGjKpYNSezK
+1GCqCVv8R1m+AsSseSUUaQCmEydd+gQbBq3r4u3wU3ylrgAoR3m+7SVyhvD+vbwI
+7+zfj+O3zu8CgYEAqaAsQH5c5Tm1hmCztB+RjD1dFWl8ScevdSzWA1HzJcrA/6+Y
+ZckI7kBG8sVMjemgFR735FbNI1hS1DBRK44Rw5SvQv0Qu5j/UeShMCt1ePkwn1k2
+p1S+Rxy1TTOXzGBzra0q+ELpzncwc3lalJSPBu7bYLrZ5HC167E1NSbQ7EECgYBo
+e/IIj+TyNz7pFcVhQixK84HiWGYYQddHJhzi4TnU2XcWonG3/uqZ6ZEVoJIJ+DJw
+h0jC1EggscwJDaBp2GY9Bwq2PD3rGsDfK+fx8ho/jYtH2/lCkVMyS2I9m9Zh68TM
+YrvZWo4LGASxZ0XyS6GOunOTZlkD1uuulMRTUU4KJwKBgQCwyjs0/ElVFvO0lPIC
+JJ//B5rqI7hNMJuTBvr4yiqVZdbgFukaU7FBVyNYDMpZi/nRbpglm+psFcwXtL8n
+bHOIGLkh8vB7OuETRYhXs567lPYtO4BmHZlXW70Sq/0xqi/Mmz1RuEg4SQ1Ug5oy
+wG6IV5EWSQAhsGirdybQ+bY7Kw==
+-----END PRIVATE KEY-----
diff --git a/test/usecases/oruclosedlooprecovery/goversion/stub/sdnr/sdnrstub.go b/test/usecases/oruclosedlooprecovery/goversion/stub/sdnr/sdnrstub.go
new file mode 100644 (file)
index 0000000..b59dbd9
--- /dev/null
@@ -0,0 +1,49 @@
+// -
+//   ========================LICENSE_START=================================
+//   O-RAN-SC
+//   %%
+//   Copyright (C) 2021: Nordix Foundation
+//   %%
+//   Licensed under the Apache License, Version 2.0 (the "License");
+//   you may not use this file except in compliance with the License.
+//   You may obtain a copy of the License at
+//
+//        http://www.apache.org/licenses/LICENSE-2.0
+//
+//   Unless required by applicable law or agreed to in writing, software
+//   distributed under the License is distributed on an "AS IS" BASIS,
+//   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//   See the License for the specific language governing permissions and
+//   limitations under the License.
+//   ========================LICENSE_END===================================
+//
+
+package main
+
+import (
+       "flag"
+       "fmt"
+       "io"
+       "net/http"
+
+       "github.com/gorilla/mux"
+)
+
+func main() {
+       port := flag.Int("port", 3904, "The port this SDNR stub will listen on")
+       flag.Parse()
+
+       r := mux.NewRouter()
+       r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={O-DU-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection={O-RU-ID}", handleData)
+
+       fmt.Println("Starting SDNR on port: ", *port)
+       http.ListenAndServe(fmt.Sprintf(":%v", *port), r)
+
+}
+
+func handleData(w http.ResponseWriter, req *http.Request) {
+       defer req.Body.Close()
+       if reqData, err := io.ReadAll(req.Body); err == nil {
+               fmt.Println("SDNR received body: ", string(reqData))
+       }
+}