From: Henrik Andersson Date: Fri, 19 Nov 2021 09:55:12 +0000 (+0000) Subject: Merge "Add schemas for input events of ODU slice assurance usecase" X-Git-Tag: 1.2.0~36 X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?a=commitdiff_plain;h=0f6367023720ecc7d7b4b38cbbc4282792172a89;hp=2d522d44bf5f69b091380b57e2879d3b7139bc8f;p=nonrtric.git Merge "Add schemas for input events of ODU slice assurance usecase" --- diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/BeanFactory.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/BeanFactory.java index faf57426..d98a8c3b 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/BeanFactory.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/BeanFactory.java @@ -26,9 +26,6 @@ import org.apache.catalina.connector.Connector; import org.oran.dmaapadapter.configuration.ApplicationConfig; import org.oran.dmaapadapter.repository.InfoType; import org.oran.dmaapadapter.repository.InfoTypes; -import org.oran.dmaapadapter.repository.Jobs; -import org.oran.dmaapadapter.tasks.DmaapTopicConsumer; -import org.oran.dmaapadapter.tasks.KafkaTopicConsumers; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; @@ -38,7 +35,6 @@ import org.springframework.context.annotation.Configuration; @Configuration public class BeanFactory { - private InfoTypes infoTypes; @Value("${server.http-port}") private int httpPort = 0; @@ -49,24 +45,9 @@ public class BeanFactory { } @Bean - public InfoTypes types(@Autowired ApplicationConfig appConfig, @Autowired Jobs jobs, - @Autowired KafkaTopicConsumers kafkaConsumers) { - if (infoTypes != null) { - return infoTypes; - } - + public InfoTypes types(@Autowired ApplicationConfig appConfig) { Collection types = appConfig.getTypes(); - - // Start a consumer for each type - for (InfoType type : types) { - if (type.isDmaapTopicDefined()) { - DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs); - topicConsumer.start(); - } - } - infoTypes = new InfoTypes(types); - kafkaConsumers.start(infoTypes); - return infoTypes; + return new InfoTypes(types); } @Bean diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/clients/AsyncRestClient.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/clients/AsyncRestClient.java index ec1541cf..8b3efed5 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/clients/AsyncRestClient.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/clients/AsyncRestClient.java @@ -62,101 +62,92 @@ public class AsyncRestClient { this.httpProxyConfig = httpProxyConfig; } - public Mono> postForEntity(String uri, @Nullable String body) { + public Mono> postForEntity(String uri, @Nullable String body, + @Nullable MediaType contentType) { Object traceTag = createTraceTag(); logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} POST body: {}", traceTag, body); Mono bodyProducer = body != null ? Mono.just(body) : Mono.empty(); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.post() // - .uri(uri) // - .contentType(MediaType.APPLICATION_JSON) // - .body(bodyProducer, String.class); - return retrieve(traceTag, request); - }); + + RequestHeadersSpec request = getWebClient() // + .post() // + .uri(uri) // + .contentType(contentType) // + .body(bodyProducer, String.class); + return retrieve(traceTag, request); } - public Mono post(String uri, @Nullable String body) { - return postForEntity(uri, body) // - .flatMap(this::toBody); + public Mono post(String uri, @Nullable String body, @Nullable MediaType contentType) { + return postForEntity(uri, body, contentType) // + .map(this::toBody); } - public Mono postWithAuthHeader(String uri, String body, String username, String password) { + public Mono postWithAuthHeader(String uri, String body, String username, String password, + MediaType mediaType) { Object traceTag = createTraceTag(); logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} POST body: {}", traceTag, body); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.post() // - .uri(uri) // - .headers(headers -> headers.setBasicAuth(username, password)) // - .contentType(MediaType.APPLICATION_JSON) // - .bodyValue(body); - return retrieve(traceTag, request) // - .flatMap(this::toBody); - }); + + RequestHeadersSpec request = getWebClient() // + .post() // + .uri(uri) // + .headers(headers -> headers.setBasicAuth(username, password)) // + .contentType(mediaType) // + .bodyValue(body); + return retrieve(traceTag, request) // + .map(this::toBody); } public Mono> putForEntity(String uri, String body) { Object traceTag = createTraceTag(); logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} PUT body: {}", traceTag, body); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.put() // - .uri(uri) // - .contentType(MediaType.APPLICATION_JSON) // - .bodyValue(body); - return retrieve(traceTag, request); - }); + + RequestHeadersSpec request = getWebClient() // + .put() // + .uri(uri) // + .contentType(MediaType.APPLICATION_JSON) // + .bodyValue(body); + return retrieve(traceTag, request); } public Mono> putForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} PUT body: ", traceTag); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.put() // - .uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient() // + .put() // + .uri(uri); + return retrieve(traceTag, request); } public Mono put(String uri, String body) { return putForEntity(uri, body) // - .flatMap(this::toBody); + .map(this::toBody); } public Mono> getForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.get().uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient().get().uri(uri); + return retrieve(traceTag, request); } public Mono get(String uri) { return getForEntity(uri) // - .flatMap(this::toBody); + .map(this::toBody); } public Mono> deleteForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.delete().uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient().delete().uri(uri); + return retrieve(traceTag, request); } public Mono delete(String uri) { return deleteForEntity(uri) // - .flatMap(this::toBody); + .map(this::toBody); } private Mono> retrieve(Object traceTag, RequestHeadersSpec request) { @@ -185,11 +176,11 @@ public class AsyncRestClient { } } - private Mono toBody(ResponseEntity entity) { + private String toBody(ResponseEntity entity) { if (entity.getBody() == null) { - return Mono.just(""); + return ""; } else { - return Mono.just(entity.getBody()); + return entity.getBody(); } } @@ -229,11 +220,11 @@ public class AsyncRestClient { .build(); } - private Mono getWebClient() { + private WebClient getWebClient() { if (this.webClient == null) { this.webClient = buildWebClient(baseUrl); } - return Mono.just(buildWebClient(baseUrl)); + return this.webClient; } } diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/controllers/ProducerCallbacksController.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/controllers/ProducerCallbacksController.java index e4dca5b8..07f5aa72 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/controllers/ProducerCallbacksController.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/controllers/ProducerCallbacksController.java @@ -82,11 +82,9 @@ public class ProducerCallbacksController { @RequestBody String body) { try { ProducerJobInfo request = gson.fromJson(body, ProducerJobInfo.class); - - logger.info("Job started callback {}", request.id); - Job job = new Job(request.id, request.targetUri, types.getType(request.typeId), request.owner, + logger.debug("Job started callback {}", request.id); + this.jobs.addJob(request.id, request.targetUri, types.getType(request.typeId), request.owner, request.lastUpdated, toJobParameters(request.jobData)); - this.jobs.put(job); return new ResponseEntity<>(HttpStatus.OK); } catch (Exception e) { return ErrorResponse.create(e, HttpStatus.NOT_FOUND); @@ -123,7 +121,7 @@ public class ProducerCallbacksController { public ResponseEntity jobDeletedCallback( // @PathVariable("infoJobId") String infoJobId) { - logger.info("Job deleted callback {}", infoJobId); + logger.debug("Job deleted callback {}", infoJobId); this.jobs.remove(infoJobId); return new ResponseEntity<>(HttpStatus.OK); } diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Job.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Job.java index d1697e96..5f7521c3 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Job.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Job.java @@ -20,39 +20,55 @@ package org.oran.dmaapadapter.repository; +import java.time.Duration; import java.util.regex.Matcher; import java.util.regex.Pattern; import lombok.Getter; import org.immutables.gson.Gson; +import org.oran.dmaapadapter.clients.AsyncRestClient; public class Job { @Gson.TypeAdapters public static class Parameters { - public String filter; - public BufferTimeout bufferTimeout; + @Getter + private String filter; + @Getter + private BufferTimeout bufferTimeout; - public Parameters() { - } + private int maxConcurrency; + + public Parameters() {} - public Parameters(String filter, BufferTimeout bufferTimeout) { + public Parameters(String filter, BufferTimeout bufferTimeout, int maxConcurrency) { this.filter = filter; this.bufferTimeout = bufferTimeout; + this.maxConcurrency = maxConcurrency; } - public static class BufferTimeout { - public BufferTimeout(int maxSize, int maxTimeMiliseconds) { - this.maxSize = maxSize; - this.maxTimeMiliseconds = maxTimeMiliseconds; - } + public int getMaxConcurrency() { + return maxConcurrency == 0 ? 1 : maxConcurrency; + } + } - public BufferTimeout() { - } + @Gson.TypeAdapters + public static class BufferTimeout { + public BufferTimeout(int maxSize, long maxTimeMiliseconds) { + this.maxSize = maxSize; + this.maxTimeMiliseconds = maxTimeMiliseconds; + } - public int maxSize; - public int maxTimeMiliseconds; + public BufferTimeout() {} + + @Getter + private int maxSize; + + private long maxTimeMiliseconds; + + public Duration getMaxTime() { + return Duration.ofMillis(maxTimeMiliseconds); } } @@ -76,7 +92,11 @@ public class Job { private final Pattern jobDataFilter; - public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters) { + @Getter + private final AsyncRestClient consumerRestClient; + + public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters, + AsyncRestClient consumerRestClient) { this.id = id; this.callbackUrl = callbackUrl; this.type = type; @@ -88,6 +108,7 @@ public class Job { } else { jobDataFilter = null; } + this.consumerRestClient = consumerRestClient; } public boolean isFilterMatch(String data) { diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Jobs.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Jobs.java index 8a388248..0e7743d4 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Jobs.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/repository/Jobs.java @@ -20,13 +20,18 @@ package org.oran.dmaapadapter.repository; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Vector; +import org.oran.dmaapadapter.clients.AsyncRestClient; +import org.oran.dmaapadapter.clients.AsyncRestClientFactory; +import org.oran.dmaapadapter.configuration.ApplicationConfig; import org.oran.dmaapadapter.exceptions.ServiceException; -import org.oran.dmaapadapter.tasks.KafkaTopicConsumers; +import org.oran.dmaapadapter.repository.Job.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -34,14 +39,21 @@ import org.springframework.stereotype.Component; @Component public class Jobs { + public interface Observer { + void onJobbAdded(Job job); + + void onJobRemoved(Job job); + } + private static final Logger logger = LoggerFactory.getLogger(Jobs.class); private Map allJobs = new HashMap<>(); private MultiMap jobsByType = new MultiMap<>(); - private final KafkaTopicConsumers kafkaConsumers; + private final AsyncRestClientFactory restclientFactory; + private final List observers = new ArrayList<>(); - public Jobs(@Autowired KafkaTopicConsumers kafkaConsumers) { - this.kafkaConsumers = kafkaConsumers; + public Jobs(@Autowired ApplicationConfig applicationConfig) { + restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig()); } public synchronized Job getJob(String id) throws ServiceException { @@ -56,11 +68,28 @@ public class Jobs { return allJobs.get(id); } - public synchronized void put(Job job) { + public void addJob(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, + Parameters parameters) { + AsyncRestClient consumerRestClient = type.isUseHttpProxy() // + ? restclientFactory.createRestClientUseHttpProxy(callbackUrl) // + : restclientFactory.createRestClientNoHttpProxy(callbackUrl); + Job job = new Job(id, callbackUrl, type, owner, lastUpdated, parameters, consumerRestClient); + this.put(job); + synchronized (observers) { + this.observers.forEach(obs -> obs.onJobbAdded(job)); + } + } + + public void addObserver(Observer obs) { + synchronized (observers) { + this.observers.add(obs); + } + } + + private synchronized void put(Job job) { logger.debug("Put job: {}", job.getId()); allJobs.put(job.getId(), job); jobsByType.put(job.getType().getId(), job.getId(), job); - kafkaConsumers.addJob(job); } public synchronized Iterable getAll() { @@ -75,10 +104,14 @@ public class Jobs { return job; } - public synchronized void remove(Job job) { - this.allJobs.remove(job.getId()); - jobsByType.remove(job.getType().getId(), job.getId()); - kafkaConsumers.removeJob(job); + public void remove(Job job) { + synchronized (this) { + this.allJobs.remove(job.getId()); + jobsByType.remove(job.getType().getId(), job.getId()); + } + synchronized (observers) { + this.observers.forEach(obs -> obs.onJobRemoved(job)); + } } public synchronized int size() { diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumer.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumer.java index 7d557585..217a0723 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumer.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumer.java @@ -29,6 +29,7 @@ import org.oran.dmaapadapter.repository.InfoType; import org.oran.dmaapadapter.repository.Jobs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.http.MediaType; import reactor.core.publisher.Flux; import reactor.core.publisher.FluxSink; @@ -38,14 +39,12 @@ import reactor.core.publisher.Mono; * The class fetches incoming requests from DMAAP and sends them further to the * consumers that has a job for this InformationType. */ - public class DmaapTopicConsumer { private static final Duration TIME_BETWEEN_DMAAP_RETRIES = Duration.ofSeconds(10); private static final Logger logger = LoggerFactory.getLogger(DmaapTopicConsumer.class); private final AsyncRestClient dmaapRestClient; private final InfiniteFlux infiniteSubmitter = new InfiniteFlux(); - private final AsyncRestClient consumerRestClient; protected final ApplicationConfig applicationConfig; protected final InfoType type; protected final Jobs jobs; @@ -85,8 +84,6 @@ public class DmaapTopicConsumer { AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig()); this.dmaapRestClient = restclientFactory.createRestClientNoHttpProxy(""); this.applicationConfig = applicationConfig; - this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("") - : restclientFactory.createRestClientNoHttpProxy(""); this.type = type; this.jobs = jobs; } @@ -108,7 +105,8 @@ public class DmaapTopicConsumer { private Mono handleDmaapErrorResponse(Throwable t) { logger.debug("error from DMAAP {} {}", t.getMessage(), type.getDmaapTopicUrl()); - return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES).flatMap(notUsed -> Mono.empty()); + return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES) // + .flatMap(notUsed -> Mono.empty()); } private Mono getFromMessageRouter(String topicUrl) { @@ -130,8 +128,8 @@ public class DmaapTopicConsumer { // Distibute the body to all jobs for this type return Flux.fromIterable(this.jobs.getJobsForType(this.type)) // - .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl())) - .flatMap(job -> consumerRestClient.post(job.getCallbackUrl(), body), CONCURRENCY) // + .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl())) // + .flatMap(job -> job.getConsumerRestClient().post("", body, MediaType.APPLICATION_JSON), CONCURRENCY) // .onErrorResume(this::handleConsumerErrorResponse); } } diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java new file mode 100644 index 00000000..9447c3ab --- /dev/null +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/DmaapTopicConsumers.java @@ -0,0 +1,43 @@ +/*- + * ========================LICENSE_START================================= + * O-RAN-SC + * %% + * Copyright (C) 2021 Nordix Foundation + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================LICENSE_END=================================== + */ + +package org.oran.dmaapadapter.tasks; + +import org.oran.dmaapadapter.configuration.ApplicationConfig; +import org.oran.dmaapadapter.repository.InfoType; +import org.oran.dmaapadapter.repository.InfoTypes; +import org.oran.dmaapadapter.repository.Jobs; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +@Component +public class DmaapTopicConsumers { + + DmaapTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types, @Autowired Jobs jobs) { + // Start a consumer for each type + for (InfoType type : types.getAll()) { + if (type.isDmaapTopicDefined()) { + DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs); + topicConsumer.start(); + } + } + } + +} diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java new file mode 100644 index 00000000..5550ce0e --- /dev/null +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaJobDataConsumer.java @@ -0,0 +1,139 @@ +/*- + * ========================LICENSE_START================================= + * O-RAN-SC + * %% + * Copyright (C) 2021 Nordix Foundation + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================LICENSE_END=================================== + */ + +package org.oran.dmaapadapter.tasks; + +import lombok.Getter; + +import org.oran.dmaapadapter.repository.Job; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.http.MediaType; +import org.springframework.web.reactive.function.client.WebClientResponseException; + +import reactor.core.Disposable; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.Sinks.Many; + +/** + * The class streams data from a multi cast sink and sends the data to the Job + * owner via REST calls. + */ +@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally +public class KafkaJobDataConsumer { + private static final Logger logger = LoggerFactory.getLogger(KafkaJobDataConsumer.class); + @Getter + private final Job job; + private Disposable subscription; + private final ErrorStats errorStats = new ErrorStats(); + + private class ErrorStats { + private int consumerFaultCounter = 0; + private boolean kafkaError = false; // eg. overflow + + public void handleOkFromConsumer() { + this.consumerFaultCounter = 0; + } + + public void handleException(Throwable t) { + if (t instanceof WebClientResponseException) { + ++this.consumerFaultCounter; + } else { + kafkaError = true; + } + } + + public boolean isItHopeless() { + final int STOP_AFTER_ERRORS = 5; + return kafkaError || consumerFaultCounter > STOP_AFTER_ERRORS; + } + + public void resetKafkaErrors() { + kafkaError = false; + } + } + + public KafkaJobDataConsumer(Job job) { + this.job = job; + } + + public synchronized void start(Many input) { + stop(); + this.errorStats.resetKafkaErrors(); + this.subscription = getMessagesFromKafka(input, job) // + .flatMap(this::postToClient, job.getParameters().getMaxConcurrency()) // + .onErrorResume(this::handleError) // + .subscribe(this::handleConsumerSentOk, // + t -> stop(), // + () -> logger.warn("KafkaMessageConsumer stopped jobId: {}", job.getId())); + } + + private Mono postToClient(String body) { + logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), body); + MediaType contentType = this.job.isBuffered() ? MediaType.APPLICATION_JSON : null; + return job.getConsumerRestClient().post("", body, contentType); + } + + public synchronized void stop() { + if (this.subscription != null) { + subscription.dispose(); + subscription = null; + } + } + + public synchronized boolean isRunning() { + return this.subscription != null; + } + + private Flux getMessagesFromKafka(Many input, Job job) { + Flux result = input.asFlux() // + .filter(job::isFilterMatch); + + if (job.isBuffered()) { + result = result.map(this::quote) // + .bufferTimeout( // + job.getParameters().getBufferTimeout().getMaxSize(), // + job.getParameters().getBufferTimeout().getMaxTime()) // + .map(Object::toString); + } + return result; + } + + private String quote(String str) { + final String q = "\""; + return q + str.replace(q, "\\\"") + q; + } + + private Mono handleError(Throwable t) { + logger.warn("exception: {} job: {}", t.getMessage(), job.getId()); + this.errorStats.handleException(t); + if (this.errorStats.isItHopeless()) { + return Mono.error(t); + } else { + return Mono.empty(); // Ignore + } + } + + private void handleConsumerSentOk(String data) { + this.errorStats.handleOkFromConsumer(); + } + +} diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java deleted file mode 100644 index 6079edfb..00000000 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumer.java +++ /dev/null @@ -1,130 +0,0 @@ -/*- - * ========================LICENSE_START================================= - * O-RAN-SC - * %% - * Copyright (C) 2021 Nordix Foundation - * %% - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * ========================LICENSE_END=================================== - */ - -package org.oran.dmaapadapter.tasks; - -import java.time.Duration; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.common.serialization.IntegerDeserializer; -import org.apache.kafka.common.serialization.StringDeserializer; -import org.oran.dmaapadapter.clients.AsyncRestClient; -import org.oran.dmaapadapter.clients.AsyncRestClientFactory; -import org.oran.dmaapadapter.configuration.ApplicationConfig; -import org.oran.dmaapadapter.repository.InfoType; -import org.oran.dmaapadapter.repository.Job; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import reactor.core.Disposable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.Sinks; -import reactor.core.publisher.Sinks.Many; -import reactor.kafka.receiver.KafkaReceiver; -import reactor.kafka.receiver.ReceiverOptions; - -/** - * The class fetches incoming requests from DMAAP and sends them further to the - * consumers that has a job for this InformationType. - */ -@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally -public class KafkaTopicConsumer { - private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumer.class); - private final AsyncRestClient consumerRestClient; - private final ApplicationConfig applicationConfig; - private final InfoType type; - private final Many consumerDistributor; - - public KafkaTopicConsumer(ApplicationConfig applicationConfig, InfoType type) { - this.applicationConfig = applicationConfig; - - final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 10; - this.consumerDistributor = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE); - - AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig()); - this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("") - : restclientFactory.createRestClientNoHttpProxy(""); - this.type = type; - startKafkaTopicReceiver(); - } - - private Disposable startKafkaTopicReceiver() { - return KafkaReceiver.create(kafkaInputProperties()) // - .receive() // - .flatMap(this::onReceivedData) // - .subscribe(null, // - throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), // - () -> logger.warn("KafkaMessageConsumer stopped")); - } - - private Flux onReceivedData(ConsumerRecord input) { - logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value()); - consumerDistributor.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST); - return consumerDistributor.asFlux(); - } - - public Disposable startDistributeToConsumer(Job job) { - return getMessagesFromKafka(job) // - .doOnNext(data -> logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), data)) - .flatMap(body -> consumerRestClient.post(job.getCallbackUrl(), body)) // - .onErrorResume(this::handleConsumerErrorResponse) // - .subscribe(null, // - throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), // - () -> logger.warn("KafkaMessageConsumer stopped {}", job.getType().getId())); - } - - private Flux getMessagesFromKafka(Job job) { - if (job.isBuffered()) { - return consumerDistributor.asFlux() // - .filter(job::isFilterMatch) // - .bufferTimeout(job.getParameters().bufferTimeout.maxSize, - Duration.ofMillis(job.getParameters().bufferTimeout.maxTimeMiliseconds)) // - .flatMap(o -> Flux.just(o.toString())); - } else { - return consumerDistributor.asFlux() // - .filter(job::isFilterMatch); - } - } - - private Mono handleConsumerErrorResponse(Throwable t) { - logger.warn("error from CONSUMER {}", t.getMessage()); - return Mono.empty(); - } - - private ReceiverOptions kafkaInputProperties() { - Map consumerProps = new HashMap<>(); - if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) { - logger.error("No kafka boostrap server is setup"); - } - consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers()); - consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor"); - consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class); - consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - - return ReceiverOptions.create(consumerProps) - .subscription(Collections.singleton(this.type.getKafkaInputTopic())); - } - -} diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumers.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumers.java index 23d9da2c..0ed85c6a 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumers.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicConsumers.java @@ -23,57 +23,97 @@ package org.oran.dmaapadapter.tasks; import java.util.HashMap; import java.util.Map; +import lombok.Getter; + import org.oran.dmaapadapter.configuration.ApplicationConfig; import org.oran.dmaapadapter.repository.InfoType; import org.oran.dmaapadapter.repository.InfoTypes; import org.oran.dmaapadapter.repository.Job; +import org.oran.dmaapadapter.repository.Jobs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.scheduling.annotation.EnableScheduling; +import org.springframework.scheduling.annotation.Scheduled; import org.springframework.stereotype.Component; -import reactor.core.Disposable; -/** - * The class fetches incoming requests from DMAAP and sends them further to the - * consumers that has a job for this InformationType. - */ @SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally @Component +@EnableScheduling public class KafkaTopicConsumers { private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumers.class); - private final Map topicConsumers = new HashMap<>(); - private final Map activeSubscriptions = new HashMap<>(); - private final ApplicationConfig appConfig; + private final Map topicListeners = new HashMap<>(); // Key is typeId - public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig) { - this.appConfig = appConfig; - } + @Getter + private final Map consumers = new HashMap<>(); // Key is jobId + + private static final int CONSUMER_SUPERVISION_INTERVAL_MS = 1000 * 60 * 3; + + public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types, + @Autowired Jobs jobs) { - public void start(InfoTypes types) { for (InfoType type : types.getAll()) { if (type.isKafkaTopicDefined()) { - KafkaTopicConsumer topicConsumer = new KafkaTopicConsumer(appConfig, type); - topicConsumers.put(type.getId(), topicConsumer); + KafkaTopicListener topicConsumer = new KafkaTopicListener(appConfig, type); + topicListeners.put(type.getId(), topicConsumer); } } + + jobs.addObserver(new Jobs.Observer() { + @Override + public void onJobbAdded(Job job) { + addJob(job); + } + + @Override + public void onJobRemoved(Job job) { + removeJob(job); + } + + }); } public synchronized void addJob(Job job) { - if (this.activeSubscriptions.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) { + if (this.consumers.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) { logger.debug("Kafka job added {}", job.getId()); - KafkaTopicConsumer topicConsumer = topicConsumers.get(job.getType().getId()); - Disposable subscription = topicConsumer.startDistributeToConsumer(job); - activeSubscriptions.put(job.getId(), subscription); + KafkaTopicListener topicConsumer = topicListeners.get(job.getType().getId()); + KafkaJobDataConsumer subscription = new KafkaJobDataConsumer(job); + subscription.start(topicConsumer.getOutput()); + consumers.put(job.getId(), subscription); } } public synchronized void removeJob(Job job) { - Disposable d = activeSubscriptions.remove(job.getId()); + KafkaJobDataConsumer d = consumers.remove(job.getId()); if (d != null) { logger.debug("Kafka job removed {}", job.getId()); - d.dispose(); + d.stop(); } } + @Scheduled(fixedRate = CONSUMER_SUPERVISION_INTERVAL_MS) + public synchronized void restartNonRunningTasks() { + + for (KafkaJobDataConsumer consumer : consumers.values()) { + if (!consumer.isRunning()) { + restartTopic(consumer); + } + } + } + + private void restartTopic(KafkaJobDataConsumer consumer) { + InfoType type = consumer.getJob().getType(); + KafkaTopicListener topic = this.topicListeners.get(type.getId()); + topic.start(); + restartConsumersOfType(topic, type); + } + + private void restartConsumersOfType(KafkaTopicListener topic, InfoType type) { + this.consumers.forEach((jobId, consumer) -> { + if (consumer.getJob().getType().getId().equals(type.getId())) { + consumer.start(topic.getOutput()); + } + }); + } } diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java new file mode 100644 index 00000000..d1045ee0 --- /dev/null +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/KafkaTopicListener.java @@ -0,0 +1,106 @@ +/*- + * ========================LICENSE_START================================= + * O-RAN-SC + * %% + * Copyright (C) 2021 Nordix Foundation + * %% + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ========================LICENSE_END=================================== + */ + +package org.oran.dmaapadapter.tasks; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.oran.dmaapadapter.configuration.ApplicationConfig; +import org.oran.dmaapadapter.repository.InfoType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import reactor.core.Disposable; +import reactor.core.publisher.Sinks; +import reactor.core.publisher.Sinks.Many; +import reactor.kafka.receiver.KafkaReceiver; +import reactor.kafka.receiver.ReceiverOptions; + +/** + * The class streams incoming requests from a Kafka topic and sends them further + * to a multi cast sink, which several other streams can connect to. + */ +@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally +public class KafkaTopicListener { + private static final Logger logger = LoggerFactory.getLogger(KafkaTopicListener.class); + private final ApplicationConfig applicationConfig; + private final InfoType type; + private Many output; + private Disposable topicReceiverTask; + + public KafkaTopicListener(ApplicationConfig applicationConfig, InfoType type) { + this.applicationConfig = applicationConfig; + this.type = type; + start(); + } + + public Many getOutput() { + return this.output; + } + + public void start() { + stop(); + final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 1024 * 10; + this.output = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE); + logger.debug("Listening to kafka topic: {} type :{}", this.type.getKafkaInputTopic(), type.getId()); + topicReceiverTask = KafkaReceiver.create(kafkaInputProperties()) // + .receive() // + .doOnNext(this::onReceivedData) // + .subscribe(null, // + this::onReceivedError, // + () -> logger.warn("KafkaTopicReceiver stopped")); + } + + private void stop() { + if (topicReceiverTask != null) { + topicReceiverTask.dispose(); + topicReceiverTask = null; + } + } + + private void onReceivedData(ConsumerRecord input) { + logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value()); + output.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST); + } + + private void onReceivedError(Throwable t) { + logger.error("KafkaTopicReceiver error: {}", t.getMessage()); + } + + private ReceiverOptions kafkaInputProperties() { + Map consumerProps = new HashMap<>(); + if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) { + logger.error("No kafka boostrap server is setup"); + } + consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers()); + consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor"); + consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); + + return ReceiverOptions.create(consumerProps) + .subscription(Collections.singleton(this.type.getKafkaInputTopic())); + } + +} diff --git a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/ProducerRegstrationTask.java b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/ProducerRegstrationTask.java index e8b236c9..8b5b6cfc 100644 --- a/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/ProducerRegstrationTask.java +++ b/dmaap-adaptor-java/src/main/java/org/oran/dmaapadapter/tasks/ProducerRegstrationTask.java @@ -87,7 +87,6 @@ public class ProducerRegstrationTask { } private void handleRegistrationCompleted() { - logger.debug("Registering types and producer completed"); isRegisteredInEcs = true; } @@ -95,6 +94,7 @@ public class ProducerRegstrationTask { logger.warn("Registration of producer failed {}", t.getMessage()); } + // Returns TRUE if registration is correct private Mono checkRegistration() { final String url = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID; return restClient.get(url) // @@ -105,7 +105,7 @@ public class ProducerRegstrationTask { private Mono isRegisterredInfoCorrect(String registerredInfoStr) { ProducerRegistrationInfo registerredInfo = gson.fromJson(registerredInfoStr, ProducerRegistrationInfo.class); if (isEqual(producerRegistrationInfo(), registerredInfo)) { - logger.trace("Already registered"); + logger.trace("Already registered in ECS"); return Mono.just(Boolean.TRUE); } else { return Mono.just(Boolean.FALSE); @@ -118,8 +118,8 @@ public class ProducerRegstrationTask { private Mono registerTypesAndProducer() { final int CONCURRENCY = 20; - final String producerUrl = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" - + PRODUCER_ID; + final String producerUrl = + applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID; return Flux.fromIterable(this.types.getAll()) // .doOnNext(type -> logger.info("Registering type {}", type.getId())) // diff --git a/dmaap-adaptor-java/src/main/resources/typeSchemaKafka.json b/dmaap-adaptor-java/src/main/resources/typeSchemaKafka.json index 0ff7c80e..290b70ae 100644 --- a/dmaap-adaptor-java/src/main/resources/typeSchemaKafka.json +++ b/dmaap-adaptor-java/src/main/resources/typeSchemaKafka.json @@ -5,6 +5,9 @@ "filter": { "type": "string" }, + "maxConcurrency": { + "type": "integer" + }, "bufferTimeout": { "type": "object", "properties": { @@ -21,6 +24,5 @@ ] } }, - "required": [ - ] -} + "required": [] +} \ No newline at end of file diff --git a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ApplicationTest.java b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ApplicationTest.java index 1ca4fac2..287c95ec 100644 --- a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ApplicationTest.java +++ b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ApplicationTest.java @@ -227,7 +227,8 @@ class ApplicationTest { ProducerJobInfo info = new ProducerJobInfo(null, "id", "typeId", "targetUri", "owner", "lastUpdated"); String body = gson.toJson(info); - testErrorCode(restClient().post(jobUrl, body), HttpStatus.NOT_FOUND, "Could not find type"); + testErrorCode(restClient().post(jobUrl, body, MediaType.APPLICATION_JSON), HttpStatus.NOT_FOUND, + "Could not find type"); } @Test diff --git a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ConsumerController.java b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ConsumerController.java index 4b6d9010..70e89d6b 100644 --- a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ConsumerController.java +++ b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/ConsumerController.java @@ -56,6 +56,15 @@ public class ConsumerController { public TestResults() {} + public boolean hasReceived(String str) { + for (String received : receivedBodies) { + if (received.equals(str)) { + return true; + } + } + return false; + } + public void reset() { receivedBodies.clear(); } diff --git a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/EcsSimulatorController.java b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/EcsSimulatorController.java index 8d1dda66..1cf8903a 100644 --- a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/EcsSimulatorController.java +++ b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/EcsSimulatorController.java @@ -105,7 +105,7 @@ public class EcsSimulatorController { new ProducerJobInfo(job.jobDefinition, jobId, job.infoTypeId, job.jobResultUri, job.owner, "TIMESTAMP"); String body = gson.toJson(request); logger.info("ECS Simulator PUT job: {}", body); - restClient.post(url, body).block(); + restClient.post(url, body, MediaType.APPLICATION_JSON).block(); } public void deleteJob(String jobId, AsyncRestClient restClient) { diff --git a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/IntegrationWithKafka.java b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/IntegrationWithKafka.java index 31ef970f..470e114e 100644 --- a/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/IntegrationWithKafka.java +++ b/dmaap-adaptor-java/src/test/java/org/oran/dmaapadapter/IntegrationWithKafka.java @@ -22,9 +22,11 @@ package org.oran.dmaapadapter; import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; +import static org.junit.jupiter.api.Assertions.assertTrue; import com.google.gson.JsonParser; +import java.time.Duration; import java.util.HashMap; import java.util.Map; @@ -47,6 +49,8 @@ import org.oran.dmaapadapter.repository.InfoType; import org.oran.dmaapadapter.repository.InfoTypes; import org.oran.dmaapadapter.repository.Job; import org.oran.dmaapadapter.repository.Jobs; +import org.oran.dmaapadapter.tasks.KafkaJobDataConsumer; +import org.oran.dmaapadapter.tasks.KafkaTopicConsumers; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; @@ -90,6 +94,9 @@ class IntegrationWithKafka { @Autowired private EcsSimulatorController ecsSimulatorController; + @Autowired + private KafkaTopicConsumers kafkaTopicConsumers; + private com.google.gson.Gson gson = new com.google.gson.GsonBuilder().create(); private static final Logger logger = LoggerFactory.getLogger(IntegrationWithKafka.class); @@ -174,9 +181,9 @@ class IntegrationWithKafka { return "https://localhost:" + this.applicationConfig.getLocalServerHttpPort(); } - private Object jobParametersAsJsonObject(String filter, int maxTimeMiliseconds, int maxSize) { - Job.Parameters param = new Job.Parameters(filter, - new Job.Parameters.BufferTimeout(maxSize, maxTimeMiliseconds)); + private Object jobParametersAsJsonObject(String filter, long maxTimeMiliseconds, int maxSize, int maxConcurrency) { + Job.Parameters param = + new Job.Parameters(filter, new Job.BufferTimeout(maxSize, maxTimeMiliseconds), maxConcurrency); String str = gson.toJson(param); return jsonObject(str); } @@ -189,13 +196,14 @@ class IntegrationWithKafka { } } - private ConsumerJobInfo consumerJobInfo(String filter, int maxTimeMiliseconds, int maxSize) { + private ConsumerJobInfo consumerJobInfo(String filter, Duration maxTime, int maxSize, int maxConcurrency) { try { InfoType type = this.types.getAll().iterator().next(); String typeId = type.getId(); String targetUri = baseUrl() + ConsumerController.CONSUMER_TARGET_URL; - return new ConsumerJobInfo(typeId, jobParametersAsJsonObject(filter, maxTimeMiliseconds, maxSize), "owner", - targetUri, ""); + return new ConsumerJobInfo(typeId, + jobParametersAsJsonObject(filter, maxTime.toMillis(), maxSize, maxConcurrency), "owner", targetUri, + ""); } catch (Exception e) { return null; } @@ -218,6 +226,23 @@ class IntegrationWithKafka { return SenderRecord.create(new ProducerRecord<>(infoType.getKafkaInputTopic(), i, data + i), i); } + private void sendDataToStream(Flux> dataToSend) { + final KafkaSender sender = KafkaSender.create(senderOptions()); + + sender.send(dataToSend) // + .doOnError(e -> logger.error("Send failed", e)) // + .blockLast(); + + } + + private void verifiedReceivedByConsumer(String... strings) { + ConsumerController.TestResults consumer = this.consumerController.testResults; + await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(strings.length)); + for (String s : strings) { + assertTrue(consumer.hasReceived(s)); + } + } + @Test void kafkaIntegrationTest() throws InterruptedException { final String JOB_ID1 = "ID1"; @@ -227,31 +252,62 @@ class IntegrationWithKafka { await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull()); assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1); - // Create a job - this.ecsSimulatorController.addJob(consumerJobInfo(".*", 10, 1000), JOB_ID1, restClient()); - this.ecsSimulatorController.addJob(consumerJobInfo(".*Message_1.*", 0, 0), JOB_ID2, restClient()); - await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2)); + // Create two jobs. One buffering and one with a filter + this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ofMillis(400), 1000, 20), JOB_ID1, + restClient()); + this.ecsSimulatorController.addJob(consumerJobInfo("^Message_1$", Duration.ZERO, 0, 1), JOB_ID2, restClient()); - final KafkaSender sender = KafkaSender.create(senderOptions()); + await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2)); var dataToSend = Flux.range(1, 3).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc. + sendDataToStream(dataToSend); - sender.send(dataToSend) // - .doOnError(e -> logger.error("Send failed", e)) // - .doOnNext(senderResult -> logger.debug("Sent {}", senderResult)) // - .doOnError(t -> logger.error("Error {}", t)) // - .blockLast(); + verifiedReceivedByConsumer("Message_1", "[\"Message_1\", \"Message_2\", \"Message_3\"]"); - ConsumerController.TestResults consumer = this.consumerController.testResults; - await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(2)); - assertThat(consumer.receivedBodies.get(0)).isEqualTo("Message_1"); - assertThat(consumer.receivedBodies.get(1)).isEqualTo("[Message_1, Message_2, Message_3]"); + // Just for testing quoting + this.consumerController.testResults.reset(); + dataToSend = Flux.just(senderRecord("Message\"_", 1)); + sendDataToStream(dataToSend); + verifiedReceivedByConsumer("[\"Message\\\"_1\"]"); - // Delete the job + // Delete the jobs this.ecsSimulatorController.deleteJob(JOB_ID1, restClient()); this.ecsSimulatorController.deleteJob(JOB_ID2, restClient()); await().untilAsserted(() -> assertThat(this.jobs.size()).isZero()); + await().untilAsserted(() -> assertThat(this.kafkaTopicConsumers.getConsumers()).isEmpty()); + } + + @Test + void kafkaIOverflow() throws InterruptedException { + final String JOB_ID1 = "ID1"; + final String JOB_ID2 = "ID2"; + + // Register producer, Register types + await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull()); + assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1); + + // Create two jobs. + this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID1, restClient()); + this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID2, restClient()); + + await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2)); + + var dataToSend = Flux.range(1, 1000000).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc. + sendDataToStream(dataToSend); // this should overflow + + KafkaJobDataConsumer consumer = kafkaTopicConsumers.getConsumers().values().iterator().next(); + await().untilAsserted(() -> assertThat(consumer.isRunning()).isFalse()); + this.consumerController.testResults.reset(); + + kafkaTopicConsumers.restartNonRunningTasks(); + this.ecsSimulatorController.deleteJob(JOB_ID2, restClient()); // Delete one job + Thread.sleep(1000); // Restarting the input seems to take some asynch time + + dataToSend = Flux.range(1, 1).map(i -> senderRecord("Howdy_", i)); + sendDataToStream(dataToSend); + + verifiedReceivedByConsumer("Howdy_1"); } } diff --git a/dmaap-mediator-producer/Dockerfile b/dmaap-mediator-producer/Dockerfile index bc09fdc2..1c7f45cb 100644 --- a/dmaap-mediator-producer/Dockerfile +++ b/dmaap-mediator-producer/Dockerfile @@ -20,7 +20,7 @@ ## ## Build ## -FROM golang:1.17-bullseye AS build +FROM nexus3.o-ran-sc.org:10001/golang:1.17-bullseye AS build WORKDIR /app COPY go.mod . COPY go.sum . diff --git a/dmaap-mediator-producer/README.md b/dmaap-mediator-producer/README.md index 90f84714..2fd7194f 100644 --- a/dmaap-mediator-producer/README.md +++ b/dmaap-mediator-producer/README.md @@ -36,7 +36,7 @@ The configured public key and cerificate shall be PEM-encoded. A self signed cer At start up the producer will register the configured job types in ICS and also register itself as a producer supporting these types. If ICS is unavailable, the producer will retry to connect indefinetely. The same goes for MR. -Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer. +Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer until it is available again. ## Development diff --git a/dmaap-mediator-producer/internal/jobs/jobs.go b/dmaap-mediator-producer/internal/jobs/jobs.go index 1c429424..6dad5fd9 100644 --- a/dmaap-mediator-producer/internal/jobs/jobs.go +++ b/dmaap-mediator-producer/internal/jobs/jobs.go @@ -34,7 +34,7 @@ import ( type TypeData struct { TypeId string `json:"id"` DMaaPTopicURL string `json:"dmaapTopicUrl"` - jobHandler *jobHandler + jobsHandler *jobsHandler } type JobInfo struct { @@ -52,8 +52,8 @@ type JobTypesManager interface { } type JobsManager interface { - AddJob(JobInfo) error - DeleteJob(jobId string) + AddJobFromRESTCall(JobInfo) error + DeleteJobFromRESTCall(jobId string) } type JobsManagerImpl struct { @@ -64,17 +64,6 @@ type JobsManagerImpl struct { distributeClient restclient.HTTPClient } -type jobHandler struct { - mu sync.Mutex - typeId string - topicUrl string - jobs map[string]JobInfo - addJobCh chan JobInfo - deleteJobCh chan string - pollClient restclient.HTTPClient - distributeClient restclient.HTTPClient -} - func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPClient, mrAddr string, distributeClient restclient.HTTPClient) *JobsManagerImpl { return &JobsManagerImpl{ configFile: typeConfigFilePath, @@ -85,10 +74,10 @@ func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPCli } } -func (jm *JobsManagerImpl) AddJob(ji JobInfo) error { +func (jm *JobsManagerImpl) AddJobFromRESTCall(ji JobInfo) error { if err := jm.validateJobInfo(ji); err == nil { typeData := jm.allTypes[ji.InfoTypeIdentity] - typeData.jobHandler.addJobCh <- ji + typeData.jobsHandler.addJobCh <- ji log.Debug("Added job: ", ji) return nil } else { @@ -96,10 +85,10 @@ func (jm *JobsManagerImpl) AddJob(ji JobInfo) error { } } -func (jm *JobsManagerImpl) DeleteJob(jobId string) { +func (jm *JobsManagerImpl) DeleteJobFromRESTCall(jobId string) { for _, typeData := range jm.allTypes { log.Debugf("Deleting job %v from type %v", jobId, typeData.TypeId) - typeData.jobHandler.deleteJobCh <- jobId + typeData.jobsHandler.deleteJobCh <- jobId } log.Debug("Deleted job: ", jobId) } @@ -131,21 +120,10 @@ func (jm *JobsManagerImpl) LoadTypesFromConfiguration() ([]config.TypeDefinition return nil, err } for _, typeDef := range typeDefs.Types { - addCh := make(chan JobInfo) - deleteCh := make(chan string) - jh := jobHandler{ - typeId: typeDef.Id, - topicUrl: typeDef.DmaapTopicURL, - jobs: make(map[string]JobInfo), - addJobCh: addCh, - deleteJobCh: deleteCh, - pollClient: jm.pollClient, - distributeClient: jm.distributeClient, - } jm.allTypes[typeDef.Id] = TypeData{ TypeId: typeDef.Id, DMaaPTopicURL: typeDef.DmaapTopicURL, - jobHandler: &jh, + jobsHandler: newJobsHandler(typeDef.Id, typeDef.DmaapTopicURL, jm.pollClient, jm.distributeClient), } } return typeDefs.Types, nil @@ -159,15 +137,38 @@ func (jm *JobsManagerImpl) GetSupportedTypes() []string { return supportedTypes } -func (jm *JobsManagerImpl) StartJobs() { +func (jm *JobsManagerImpl) StartJobsForAllTypes() { for _, jobType := range jm.allTypes { - go jobType.jobHandler.start(jm.mrAddress) + go jobType.jobsHandler.startPollingAndDistribution(jm.mrAddress) + + } +} + +type jobsHandler struct { + mu sync.Mutex + typeId string + topicUrl string + jobs map[string]job + addJobCh chan JobInfo + deleteJobCh chan string + pollClient restclient.HTTPClient + distributeClient restclient.HTTPClient +} +func newJobsHandler(typeId string, topicURL string, pollClient restclient.HTTPClient, distributeClient restclient.HTTPClient) *jobsHandler { + return &jobsHandler{ + typeId: typeId, + topicUrl: topicURL, + jobs: make(map[string]job), + addJobCh: make(chan JobInfo), + deleteJobCh: make(chan string), + pollClient: pollClient, + distributeClient: distributeClient, } } -func (jh *jobHandler) start(mRAddress string) { +func (jh *jobsHandler) startPollingAndDistribution(mRAddress string) { go func() { for { jh.pollAndDistributeMessages(mRAddress) @@ -181,45 +182,104 @@ func (jh *jobHandler) start(mRAddress string) { }() } -func (jh *jobHandler) pollAndDistributeMessages(mRAddress string) { +func (jh *jobsHandler) pollAndDistributeMessages(mRAddress string) { log.Debugf("Processing jobs for type: %v", jh.typeId) messagesBody, error := restclient.Get(mRAddress+jh.topicUrl, jh.pollClient) if error != nil { - log.Warnf("Error getting data from MR. Cause: %v", error) + log.Warn("Error getting data from MR. Cause: ", error) } - log.Debugf("Received messages: %v", string(messagesBody)) + log.Debug("Received messages: ", string(messagesBody)) jh.distributeMessages(messagesBody) } -func (jh *jobHandler) distributeMessages(messages []byte) { +func (jh *jobsHandler) distributeMessages(messages []byte) { if len(messages) > 2 { jh.mu.Lock() defer jh.mu.Unlock() - for _, jobInfo := range jh.jobs { - go jh.sendMessagesToConsumer(messages, jobInfo) + for _, job := range jh.jobs { + if len(job.messagesChannel) < cap(job.messagesChannel) { + job.messagesChannel <- messages + } else { + jh.emptyMessagesBuffer(job) + } } } } -func (jh *jobHandler) sendMessagesToConsumer(messages []byte, jobInfo JobInfo) { - log.Debugf("Processing job: %v", jobInfo.InfoJobIdentity) - if postErr := restclient.Post(jobInfo.TargetUri, messages, jh.distributeClient); postErr != nil { - log.Warnf("Error posting data for job: %v. Cause: %v", jobInfo, postErr) +func (jh *jobsHandler) emptyMessagesBuffer(job job) { + log.Debug("Emptying message queue for job: ", job.jobInfo.InfoJobIdentity) +out: + for { + select { + case <-job.messagesChannel: + default: + break out + } } - log.Debugf("Messages distributed to consumer: %v.", jobInfo.Owner) } -func (jh *jobHandler) monitorManagementChannels() { +func (jh *jobsHandler) monitorManagementChannels() { select { case addedJob := <-jh.addJobCh: - jh.mu.Lock() - log.Debugf("received %v from addJobCh\n", addedJob) - jh.jobs[addedJob.InfoJobIdentity] = addedJob - jh.mu.Unlock() + jh.addJob(addedJob) case deletedJob := <-jh.deleteJobCh: - jh.mu.Lock() - log.Debugf("received %v from deleteJobCh\n", deletedJob) + jh.deleteJob(deletedJob) + } +} + +func (jh *jobsHandler) addJob(addedJob JobInfo) { + jh.mu.Lock() + log.Debug("Add job: ", addedJob) + newJob := newJob(addedJob, jh.distributeClient) + go newJob.start() + jh.jobs[addedJob.InfoJobIdentity] = newJob + jh.mu.Unlock() +} + +func (jh *jobsHandler) deleteJob(deletedJob string) { + jh.mu.Lock() + log.Debug("Delete job: ", deletedJob) + j, exist := jh.jobs[deletedJob] + if exist { + j.controlChannel <- struct{}{} delete(jh.jobs, deletedJob) - jh.mu.Unlock() } + jh.mu.Unlock() +} + +type job struct { + jobInfo JobInfo + client restclient.HTTPClient + messagesChannel chan []byte + controlChannel chan struct{} +} + +func newJob(j JobInfo, c restclient.HTTPClient) job { + return job{ + jobInfo: j, + client: c, + messagesChannel: make(chan []byte, 10), + controlChannel: make(chan struct{}), + } +} + +func (j *job) start() { +out: + for { + select { + case <-j.controlChannel: + log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity) + break out + case msg := <-j.messagesChannel: + j.sendMessagesToConsumer(msg) + } + } +} + +func (j *job) sendMessagesToConsumer(messages []byte) { + log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity) + if postErr := restclient.Post(j.jobInfo.TargetUri, messages, j.client); postErr != nil { + log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr) + } + log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner) } diff --git a/dmaap-mediator-producer/internal/jobs/jobs_test.go b/dmaap-mediator-producer/internal/jobs/jobs_test.go index 3651a136..552b5fa1 100644 --- a/dmaap-mediator-producer/internal/jobs/jobs_test.go +++ b/dmaap-mediator-producer/internal/jobs/jobs_test.go @@ -36,7 +36,7 @@ import ( const typeDefinition = `{"types": [{"id": "type1", "dmaapTopicUrl": "events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/type1"}]}` -func TestGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) { +func TestJobsManagerGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) { assertions := require.New(t) typesDir, err := os.MkdirTemp("", "configs") if err != nil { @@ -63,7 +63,7 @@ func TestGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *tes assertions.EqualValues([]string{"type1"}, supportedTypes) } -func TestManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) { +func TestJobsManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) { assertions := require.New(t) managerUnderTest := NewJobsManagerImpl("", nil, "", nil) wantedJob := JobInfo{ @@ -74,36 +74,36 @@ func TestManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) { InfoJobData: "{}", InfoTypeIdentity: "type1", } - jobHandler := jobHandler{ + jobsHandler := jobsHandler{ addJobCh: make(chan JobInfo)} managerUnderTest.allTypes["type1"] = TypeData{ - TypeId: "type1", - jobHandler: &jobHandler, + TypeId: "type1", + jobsHandler: &jobsHandler, } var err error go func() { - err = managerUnderTest.AddJob(wantedJob) + err = managerUnderTest.AddJobFromRESTCall(wantedJob) }() assertions.Nil(err) - addedJob := <-jobHandler.addJobCh + addedJob := <-jobsHandler.addJobCh assertions.Equal(wantedJob, addedJob) } -func TestManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) { +func TestJobsManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) { assertions := require.New(t) managerUnderTest := NewJobsManagerImpl("", nil, "", nil) jobInfo := JobInfo{ InfoTypeIdentity: "type1", } - err := managerUnderTest.AddJob(jobInfo) + err := managerUnderTest.AddJobFromRESTCall(jobInfo) assertions.NotNil(err) assertions.Equal("type not supported: type1", err.Error()) } -func TestManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) { +func TestJobsManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) { assertions := require.New(t) managerUnderTest := NewJobsManagerImpl("", nil, "", nil) managerUnderTest.allTypes["type1"] = TypeData{ @@ -113,12 +113,12 @@ func TestManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) { jobInfo := JobInfo{ InfoTypeIdentity: "type1", } - err := managerUnderTest.AddJob(jobInfo) + err := managerUnderTest.AddJobFromRESTCall(jobInfo) assertions.NotNil(err) assertions.Equal("missing required job identity: { type1}", err.Error()) } -func TestManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) { +func TestJobsManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) { assertions := require.New(t) managerUnderTest := NewJobsManagerImpl("", nil, "", nil) managerUnderTest.allTypes["type1"] = TypeData{ @@ -129,38 +129,42 @@ func TestManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) { InfoTypeIdentity: "type1", InfoJobIdentity: "job1", } - err := managerUnderTest.AddJob(jobInfo) + err := managerUnderTest.AddJobFromRESTCall(jobInfo) assertions.NotNil(err) assertions.Equal("missing required target URI: { job1 type1}", err.Error()) } -func TestManagerDeleteJob(t *testing.T) { +func TestJobsManagerDeleteJob_shouldSendDeleteToChannel(t *testing.T) { assertions := require.New(t) managerUnderTest := NewJobsManagerImpl("", nil, "", nil) - jobHandler := jobHandler{ + jobsHandler := jobsHandler{ deleteJobCh: make(chan string)} managerUnderTest.allTypes["type1"] = TypeData{ - TypeId: "type1", - jobHandler: &jobHandler, + TypeId: "type1", + jobsHandler: &jobsHandler, } - go managerUnderTest.DeleteJob("job2") + go managerUnderTest.DeleteJobFromRESTCall("job2") - assertions.Equal("job2", <-jobHandler.deleteJobCh) + assertions.Equal("job2", <-jobsHandler.deleteJobCh) } -func TestHandlerPollAndDistributeMessages(t *testing.T) { +func TestAddJobToJobsManager_shouldStartPollAndDistributeMessages(t *testing.T) { assertions := require.New(t) - wg := sync.WaitGroup{} + called := false messages := `[{"message": {"data": "data"}}]` pollClientMock := NewTestClient(func(req *http.Request) *http.Response { if req.URL.String() == "http://mrAddr/topicUrl" { assertions.Equal(req.Method, "GET") - wg.Done() // Signal that the poll call has been made + body := "[]" + if !called { + called = true + body = messages + } return &http.Response{ StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte(messages))), + Body: ioutil.NopCloser(bytes.NewReader([]byte(body))), Header: make(http.Header), // Must be set to non-nil value or it panics } } @@ -168,12 +172,14 @@ func TestHandlerPollAndDistributeMessages(t *testing.T) { t.Fail() return nil }) + + wg := sync.WaitGroup{} distributeClientMock := NewTestClient(func(req *http.Request) *http.Response { if req.URL.String() == "http://consumerHost/target" { assertions.Equal(req.Method, "POST") - assertions.Equal(messages, getBodyAsString(req)) + assertions.Equal(messages, getBodyAsString(req, t)) assertions.Equal("application/json", req.Header.Get("Content-Type")) - wg.Done() // Signal that the distribution call has been made + wg.Done() return &http.Response{ StatusCode: 200, Body: ioutil.NopCloser(bytes.NewBufferString(`OK`)), @@ -184,73 +190,73 @@ func TestHandlerPollAndDistributeMessages(t *testing.T) { t.Fail() return nil }) + jobsHandler := newJobsHandler("type1", "/topicUrl", pollClientMock, distributeClientMock) + + jobsManager := NewJobsManagerImpl("", pollClientMock, "http://mrAddr", distributeClientMock) + jobsManager.allTypes["type1"] = TypeData{ + DMaaPTopicURL: "/topicUrl", + TypeId: "type1", + jobsHandler: jobsHandler, + } + + jobsManager.StartJobsForAllTypes() jobInfo := JobInfo{ InfoTypeIdentity: "type1", InfoJobIdentity: "job1", TargetUri: "http://consumerHost/target", } - handlerUnderTest := jobHandler{ - topicUrl: "/topicUrl", - jobs: map[string]JobInfo{jobInfo.InfoJobIdentity: jobInfo}, - pollClient: pollClientMock, - distributeClient: distributeClientMock, - } - wg.Add(2) // Two calls should be made to the server, one to poll and one to distribute - handlerUnderTest.pollAndDistributeMessages("http://mrAddr") + wg.Add(1) // Wait till the distribution has happened + err := jobsManager.AddJobFromRESTCall(jobInfo) + assertions.Nil(err) - if waitTimeout(&wg, 100*time.Millisecond) { + if waitTimeout(&wg, 2*time.Second) { t.Error("Not all calls to server were made") t.Fail() } } -func TestHandlerAddJob_shouldAddJobToJobsMap(t *testing.T) { - assertions := require.New(t) +func TestJobsHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) { + jobToDelete := newJob(JobInfo{}, nil) + go jobToDelete.start() + jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil) + jobsHandler.jobs["job1"] = jobToDelete - jobInfo := JobInfo{ - InfoTypeIdentity: "type1", - InfoJobIdentity: "job1", - TargetUri: "http://consumerHost/target", - } + go jobsHandler.monitorManagementChannels() - addCh := make(chan JobInfo) - handlerUnderTest := jobHandler{ - mu: sync.Mutex{}, - jobs: map[string]JobInfo{}, - addJobCh: addCh, - } + jobsHandler.deleteJobCh <- "job1" - go func() { - addCh <- jobInfo - }() - - handlerUnderTest.monitorManagementChannels() - - assertions.Len(handlerUnderTest.jobs, 1) - assertions.Equal(jobInfo, handlerUnderTest.jobs["job1"]) + deleted := false + for i := 0; i < 100; i++ { + if len(jobsHandler.jobs) == 0 { + deleted = true + break + } + time.Sleep(time.Microsecond) // Need to drop control to let the job's goroutine do the job + } + require.New(t).True(deleted, "Job not deleted") } -func TestHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) { - assertions := require.New(t) +func TestJobsHandlerEmptyJobMessageBufferWhenItIsFull(t *testing.T) { + job := newJob(JobInfo{ + InfoJobIdentity: "job", + }, nil) - deleteCh := make(chan string) - handlerUnderTest := jobHandler{ - mu: sync.Mutex{}, - jobs: map[string]JobInfo{"job1": { - InfoJobIdentity: "job1", - }}, - deleteJobCh: deleteCh, - } + jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil) + jobsHandler.jobs["job1"] = job - go func() { - deleteCh <- "job1" - }() + fillMessagesBuffer(job.messagesChannel) - handlerUnderTest.monitorManagementChannels() + jobsHandler.distributeMessages([]byte("sent msg")) - assertions.Len(handlerUnderTest.jobs, 0) + require.New(t).Len(job.messagesChannel, 0) +} + +func fillMessagesBuffer(mc chan []byte) { + for i := 0; i < cap(mc); i++ { + mc <- []byte("msg") + } } type RoundTripFunc func(req *http.Request) *http.Response @@ -282,8 +288,10 @@ func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { } } -func getBodyAsString(req *http.Request) string { +func getBodyAsString(req *http.Request, t *testing.T) string { buf := new(bytes.Buffer) - buf.ReadFrom(req.Body) + if _, err := buf.ReadFrom(req.Body); err != nil { + t.Fail() + } return buf.String() } diff --git a/dmaap-mediator-producer/internal/server/server.go b/dmaap-mediator-producer/internal/server/server.go index 8bed1f91..79646c29 100644 --- a/dmaap-mediator-producer/internal/server/server.go +++ b/dmaap-mediator-producer/internal/server/server.go @@ -71,7 +71,7 @@ func (h *ProducerCallbackHandler) addInfoJobHandler(w http.ResponseWriter, r *ht http.Error(w, fmt.Sprintf("Invalid json body. Cause: %v", unmarshalErr), http.StatusBadRequest) return } - if err := h.jobsManager.AddJob(jobInfo); err != nil { + if err := h.jobsManager.AddJobFromRESTCall(jobInfo); err != nil { http.Error(w, fmt.Sprintf("Invalid job info. Cause: %v", err), http.StatusBadRequest) } } @@ -84,7 +84,7 @@ func (h *ProducerCallbackHandler) deleteInfoJobHandler(w http.ResponseWriter, r return } - h.jobsManager.DeleteJob(id) + h.jobsManager.DeleteJobFromRESTCall(id) } type notFoundHandler struct{} diff --git a/dmaap-mediator-producer/internal/server/server_test.go b/dmaap-mediator-producer/internal/server/server_test.go index 5c2027aa..1d458c98 100644 --- a/dmaap-mediator-producer/internal/server/server_test.go +++ b/dmaap-mediator-producer/internal/server/server_test.go @@ -136,7 +136,7 @@ func TestAddInfoJobHandler(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { jobHandlerMock := jobhandler.JobHandler{} - jobHandlerMock.On("AddJob", tt.args.job).Return(tt.args.mockReturn) + jobHandlerMock.On("AddJobFromRESTCall", tt.args.job).Return(tt.args.mockReturn) callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock) @@ -148,7 +148,7 @@ func TestAddInfoJobHandler(t *testing.T) { assertions.Equal(tt.wantedStatus, responseRecorder.Code, tt.name) assertions.Contains(responseRecorder.Body.String(), tt.wantedBody, tt.name) - jobHandlerMock.AssertCalled(t, "AddJob", tt.args.job) + jobHandlerMock.AssertCalled(t, "AddJobFromRESTCall", tt.args.job) }) } } @@ -156,7 +156,7 @@ func TestAddInfoJobHandler(t *testing.T) { func TestDeleteJob(t *testing.T) { assertions := require.New(t) jobHandlerMock := jobhandler.JobHandler{} - jobHandlerMock.On("DeleteJob", mock.Anything).Return(nil) + jobHandlerMock.On("DeleteJobFromRESTCall", mock.Anything).Return(nil) callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock) @@ -168,7 +168,7 @@ func TestDeleteJob(t *testing.T) { assertions.Equal("", responseRecorder.Body.String()) - jobHandlerMock.AssertCalled(t, "DeleteJob", "job1") + jobHandlerMock.AssertCalled(t, "DeleteJobFromRESTCall", "job1") } func newRequest(method string, url string, jobInfo *jobs.JobInfo, t *testing.T) *http.Request { diff --git a/dmaap-mediator-producer/main.go b/dmaap-mediator-producer/main.go index 74f4edfb..194ed750 100644 --- a/dmaap-mediator-producer/main.go +++ b/dmaap-mediator-producer/main.go @@ -60,7 +60,7 @@ func main() { if err := registerTypesAndProducer(jobsManager, configuration.InfoCoordinatorAddress, callbackAddress, retryClient); err != nil { log.Fatalf("Stopping producer due to: %v", err) } - jobsManager.StartJobs() + jobsManager.StartJobsForAllTypes() log.Debug("Starting DMaaP Mediator Producer") go func() { diff --git a/dmaap-mediator-producer/mocks/jobhandler/JobHandler.go b/dmaap-mediator-producer/mocks/jobhandler/JobHandler.go index 8e30b1c2..ad20752c 100644 --- a/dmaap-mediator-producer/mocks/jobhandler/JobHandler.go +++ b/dmaap-mediator-producer/mocks/jobhandler/JobHandler.go @@ -13,7 +13,7 @@ type JobHandler struct { } // AddJob provides a mock function with given fields: _a0 -func (_m *JobHandler) AddJob(_a0 jobs.JobInfo) error { +func (_m *JobHandler) AddJobFromRESTCall(_a0 jobs.JobInfo) error { ret := _m.Called(_a0) var r0 error @@ -27,6 +27,6 @@ func (_m *JobHandler) AddJob(_a0 jobs.JobInfo) error { } // DeleteJob provides a mock function with given fields: jobId -func (_m *JobHandler) DeleteJob(jobId string) { +func (_m *JobHandler) DeleteJobFromRESTCall(jobId string) { _m.Called(jobId) } diff --git a/dmaap-mediator-producer/stub/consumer/consumerstub.go b/dmaap-mediator-producer/stub/consumer/consumerstub.go index 03e67c02..5cbcaeab 100644 --- a/dmaap-mediator-producer/stub/consumer/consumerstub.go +++ b/dmaap-mediator-producer/stub/consumer/consumerstub.go @@ -44,7 +44,7 @@ func main() { registerJob(*port) fmt.Print("Starting consumer on port: ", *port) - http.ListenAndServe(fmt.Sprintf(":%v", *port), nil) + fmt.Println(http.ListenAndServe(fmt.Sprintf(":%v", *port), nil)) } func registerJob(port int) { diff --git a/dmaap-mediator-producer/stub/dmaap/mrstub.go b/dmaap-mediator-producer/stub/dmaap/mrstub.go index 82ae08d5..36ffa396 100644 --- a/dmaap-mediator-producer/stub/dmaap/mrstub.go +++ b/dmaap-mediator-producer/stub/dmaap/mrstub.go @@ -57,7 +57,7 @@ func main() { http.HandleFunc("/events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/STD_Fault_Messages", handleData) fmt.Print("Starting mr on port: ", *port) - http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil) + fmt.Println(http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil)) } diff --git a/docker-compose/.env b/docker-compose/.env new file mode 100644 index 00000000..6fc3528d --- /dev/null +++ b/docker-compose/.env @@ -0,0 +1,64 @@ +# ============LICENSE_START=============================================== +# Copyright (C) 2021 Nordix Foundation. All rights reserved. +# ======================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END================================================= +# + +#PMS +PMS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-policy-agent" +PMS_IMAGE_TAG="2.2.0" + +#A1_SIM +A1_SIM_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator" +A1_SIM_IMAGE_TAG="2.1.0" + +#RAPP +RAPP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-r-app-catalogue" +RAPP_IMAGE_TAG="1.0.0" + +#CONTROL_PANEL +CONTROL_PANEL_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-controlpanel" +CONTROL_PANEL_IMAGE_TAG="2.2.0" + +#GATEWAY +NONRTRIC_GATEWAY_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-gateway" +NONRTRIC_GATEWAY_IMAGE_TAG="1.0.0" + +#ECS +ECS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-enrichment-coordinator-service" +ECS_IMAGE_TAG="1.1.0" + +#CONSUMER +CONSUMER_IMAGE_BASE="eexit/mirror-http-server" +CONSUMER_IMAGE_TAG="latest" + +#ORU +ORU_APP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-o-ru-closed-loop-recovery" +ORU_APP_IMAGE_TAG="1.0.0" + +#DB +DB_IMAGE_BASE="mysql/mysql-server" +DB_IMAGE_TAG="5.6" + +#A1CONTROLLER +A1CONTROLLER_IMAGE_BASE="nexus3.onap.org:10002/onap/sdnc-image" +A1CONTROLLER_IMAGE_TAG="2.1.2" + +#DMAAP_MEDIATOR_GO +DMAAP_MEDIATOR_GO_BASE="nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer" +DMAAP_MEDIATOR_GO_TAG="1.0,0" + +#DMAAP_MEDIATOR_JAVA +DMAAP_MEDIATOR_JAVA_BASE="nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor" +DMAAP_MEDIATOR_JAVA_TAG="1.0.0-SNAPSHOT" \ No newline at end of file diff --git a/docker-compose/a1-sim/docker-compose.yaml b/docker-compose/a1-sim/docker-compose.yaml index 9366ff1b..84679468 100644 --- a/docker-compose/a1-sim/docker-compose.yaml +++ b/docker-compose/a1-sim/docker-compose.yaml @@ -22,7 +22,7 @@ networks: services: a1-sim-OSC: - image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0 + image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}" container_name: a1-sim-OSC networks: - default @@ -35,7 +35,7 @@ services: - ALLOW_HTTP=true a1-sim-STD: - image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0 + image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}" container_name: a1-sim-STD networks: - default @@ -48,7 +48,7 @@ services: - ALLOW_HTTP=true a1-sim-STD-v2: - image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0 + image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}" container_name: a1-sim-STD-v2 networks: - default diff --git a/docker-compose/dmaap-mediator-go/docker-compose.yaml b/docker-compose/dmaap-mediator-go/docker-compose.yaml index 340d1588..4efdf57e 100644 --- a/docker-compose/dmaap-mediator-go/docker-compose.yaml +++ b/docker-compose/dmaap-mediator-go/docker-compose.yaml @@ -22,18 +22,15 @@ networks: services: dmaap-mediator-go: - image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer:1.0.0 + image: "${DMAAP_MEDIATOR_GO_BASE}:${DMAAP_MEDIATOR_GO_TAG}" container_name: dmaap-mediator-go environment: - INFO_PRODUCER_HOST=http://consumer - - LOG_LEVEL=Debug - INFO_PRODUCER_PORT=8088 - INFO_COORD_ADDR=http://ecs:8083 - - MR_HOST=http://dmaap-mr - - MR_PORT=3904 - - INFO_PRODUCER_SUPERVISION_CALLBACK_HOST=http://consumer - - INFO_PRODUCER_SUPERVISION_CALLBACK_PORT=8088 - - INFO_JOB_CALLBACK_HOST=http://consumer - - INFO_JOB_CALLBACK_PORT=8088 + - DMAAP_MR_ADDR=http://dmaap-mr:3904 + - PRODUCER_CERT_PATH=security/producer.crt + - PRODUCER_KEY_PATH=security/producer.key + - LOG_LEVEL=Debug networks: - default \ No newline at end of file diff --git a/docker-compose/dmaap-mediator-java/docker-compose.yaml b/docker-compose/dmaap-mediator-java/docker-compose.yaml index 1d53de43..5cfe8098 100644 --- a/docker-compose/dmaap-mediator-java/docker-compose.yaml +++ b/docker-compose/dmaap-mediator-java/docker-compose.yaml @@ -22,7 +22,7 @@ networks: services: dmaap-mediator-java: - image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor:1.0.0-SNAPSHOT + image: "${DMAAP_MEDIATOR_JAVA_BASE}:${DMAAP_MEDIATOR_JAVA_TAG}" container_name: dmaap-mediator-java networks: - default diff --git a/docker-compose/ecs/docker-compose.yaml b/docker-compose/ecs/docker-compose.yaml index 376f734c..6de293f4 100644 --- a/docker-compose/ecs/docker-compose.yaml +++ b/docker-compose/ecs/docker-compose.yaml @@ -22,7 +22,7 @@ networks: services: ecs: - image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-enrichment-coordinator-service:1.2.0-SNAPSHOT + image: "${ECS_IMAGE_BASE}:${ECS_IMAGE_TAG}" container_name: ecs networks: default: @@ -32,7 +32,7 @@ services: - 8083:8083 - 8434:8434 consumer: - image: eexit/mirror-http-server + image: "${CONSUMER_IMAGE_BASE}:${CONSUMER_IMAGE_TAG}" container_name: consumer networks: - default diff --git a/docker-compose/policy-service/docker-compose.yaml b/docker-compose/policy-service/docker-compose.yaml index a593e2e8..2dfc38c5 100644 --- a/docker-compose/policy-service/docker-compose.yaml +++ b/docker-compose/policy-service/docker-compose.yaml @@ -22,7 +22,7 @@ networks: services: policy-agent: - image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-policy-agent:2.3.0 + image: "${PMS_IMAGE_BASE}:${PMS_IMAGE_TAG}" container_name: policy-agent networks: default: diff --git a/docker-compose/rapp/docker-compose.yaml b/docker-compose/rapp/docker-compose.yaml index ade37f71..54775886 100644 --- a/docker-compose/rapp/docker-compose.yaml +++ b/docker-compose/rapp/docker-compose.yaml @@ -22,7 +22,7 @@ networks: services: r-app: - image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-r-app-catalogue:1.1.0 + image: "${RAPP_IMAGE_BASE}:${RAPP_IMAGE_TAG}" container_name: r-app networks: default: diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/clients/AsyncRestClient.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/clients/AsyncRestClient.java index 1b8e0643..b7f23b1f 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/clients/AsyncRestClient.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/clients/AsyncRestClient.java @@ -67,96 +67,85 @@ public class AsyncRestClient { logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} POST body: {}", traceTag, body); Mono bodyProducer = body != null ? Mono.just(body) : Mono.empty(); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.post() // - .uri(uri) // - .contentType(MediaType.APPLICATION_JSON) // - .body(bodyProducer, String.class); - return retrieve(traceTag, request); - }); + + RequestHeadersSpec request = getWebClient() // + .post() // + .uri(uri) // + .contentType(MediaType.APPLICATION_JSON) // + .body(bodyProducer, String.class); + return retrieve(traceTag, request); } public Mono post(String uri, @Nullable String body) { return postForEntity(uri, body) // - .flatMap(this::toBody); + .map(this::toBody); } public Mono postWithAuthHeader(String uri, String body, String username, String password) { Object traceTag = createTraceTag(); logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} POST body: {}", traceTag, body); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.post() // - .uri(uri) // - .headers(headers -> headers.setBasicAuth(username, password)) // - .contentType(MediaType.APPLICATION_JSON) // - .bodyValue(body); - return retrieve(traceTag, request) // - .flatMap(this::toBody); - }); + + RequestHeadersSpec request = getWebClient() // + .post() // + .uri(uri) // + .headers(headers -> headers.setBasicAuth(username, password)) // + .contentType(MediaType.APPLICATION_JSON) // + .bodyValue(body); + return retrieve(traceTag, request) // + .map(this::toBody); } public Mono> putForEntity(String uri, String body) { Object traceTag = createTraceTag(); logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} PUT body: {}", traceTag, body); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.put() // - .uri(uri) // - .contentType(MediaType.APPLICATION_JSON) // - .bodyValue(body); - return retrieve(traceTag, request); - }); + + RequestHeadersSpec request = getWebClient() // + .put() // + .uri(uri) // + .contentType(MediaType.APPLICATION_JSON) // + .bodyValue(body); + return retrieve(traceTag, request); } public Mono> putForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri); logger.trace("{} PUT body: ", traceTag); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.put() // - .uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient() // + .put() // + .uri(uri); + return retrieve(traceTag, request); } public Mono put(String uri, String body) { return putForEntity(uri, body) // - .flatMap(this::toBody); + .map(this::toBody); } public Mono> getForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.get().uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient().get().uri(uri); + return retrieve(traceTag, request); } public Mono get(String uri) { return getForEntity(uri) // - .flatMap(this::toBody); + .map(this::toBody); } public Mono> deleteForEntity(String uri) { Object traceTag = createTraceTag(); logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri); - return getWebClient() // - .flatMap(client -> { - RequestHeadersSpec request = client.delete().uri(uri); - return retrieve(traceTag, request); - }); + RequestHeadersSpec request = getWebClient().delete().uri(uri); + return retrieve(traceTag, request); } public Mono delete(String uri) { return deleteForEntity(uri) // - .flatMap(this::toBody); + .map(this::toBody); } private Mono> retrieve(Object traceTag, RequestHeadersSpec request) { @@ -185,11 +174,11 @@ public class AsyncRestClient { } } - private Mono toBody(ResponseEntity entity) { + private String toBody(ResponseEntity entity) { if (entity.getBody() == null) { - return Mono.just(""); + return ""; } else { - return Mono.just(entity.getBody()); + return entity.getBody(); } } @@ -229,11 +218,10 @@ public class AsyncRestClient { .build(); } - private Mono getWebClient() { + private WebClient getWebClient() { if (this.webClient == null) { this.webClient = buildWebClient(baseUrl); } - return Mono.just(buildWebClient(baseUrl)); + return this.webClient; } - } diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/a1e/A1eController.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/a1e/A1eController.java index 9609e276..8c056fc6 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/a1e/A1eController.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/a1e/A1eController.java @@ -298,7 +298,7 @@ public class A1eController { return validatePutEiJob(eiJobId, eiJobObject) // .flatMap(this::startEiJob) // .doOnNext(newEiJob -> this.eiJobs.put(newEiJob)) // - .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK))) + .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) // .onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.INTERNAL_SERVER_ERROR))); } @@ -306,7 +306,7 @@ public class A1eController { return this.producerCallbacks.startInfoSubscriptionJob(newEiJob, infoProducers) // .doOnNext(noOfAcceptingProducers -> this.logger.debug( "Started EI job {}, number of activated producers: {}", newEiJob.getId(), noOfAcceptingProducers)) // - .flatMap(noOfAcceptingProducers -> Mono.just(newEiJob)); + .map(noOfAcceptingProducers -> newEiJob); } private Mono validatePutEiJob(String eiJobId, A1eEiJobInfo eiJobInfo) { diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1consumer/ConsumerController.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1consumer/ConsumerController.java index 47a4a2ec..b108380b 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1consumer/ConsumerController.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1consumer/ConsumerController.java @@ -308,7 +308,7 @@ public class ConsumerController { return validatePutInfoJob(jobId, informationJobObject, performTypeCheck) // .flatMap(this::startInfoSubscriptionJob) // .doOnNext(this.infoJobs::put) // - .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK))) + .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) // .onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.NOT_FOUND))); } @@ -441,7 +441,7 @@ public class ConsumerController { return this.producerCallbacks.startInfoSubscriptionJob(newInfoJob, infoProducers) // .doOnNext(noOfAcceptingProducers -> this.logger.debug("Started job {}, number of activated producers: {}", newInfoJob.getId(), noOfAcceptingProducers)) // - .flatMap(noOfAcceptingProducers -> Mono.just(newInfoJob)); + .map(noOfAcceptingProducers -> newInfoJob); } private Mono validatePutInfoJob(String jobId, ConsumerJobInfo jobInfo, boolean performTypeCheck) { diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1producer/ProducerCallbacks.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1producer/ProducerCallbacks.java index a97bdf66..558ae799 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1producer/ProducerCallbacks.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/controllers/r1producer/ProducerCallbacks.java @@ -84,7 +84,7 @@ public class ProducerCallbacks { return Flux.fromIterable(getProducersForJob(infoJob, infoProducers)) // .flatMap(infoProducer -> startInfoJob(infoProducer, infoJob, retrySpec)) // .collectList() // - .flatMap(okResponses -> Mono.just(Integer.valueOf(okResponses.size()))); // + .map(okResponses -> Integer.valueOf(okResponses.size())); // } /** diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/InfoTypeSubscriptions.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/InfoTypeSubscriptions.java index 65978e15..533199ff 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/InfoTypeSubscriptions.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/repository/InfoTypeSubscriptions.java @@ -222,8 +222,7 @@ public class InfoTypeSubscriptions { private Mono notifySubscriber(Function> notifyFunc, SubscriptionInfo subscriptionInfo) { Retry retrySpec = Retry.backoff(3, Duration.ofSeconds(1)); - return Mono.just(1) // - .flatMap(notUsed -> notifyFunc.apply(subscriptionInfo)) // + return notifyFunc.apply(subscriptionInfo) // .retryWhen(retrySpec) // .onErrorResume(throwable -> { logger.warn("Consumer callback failed {}, removing subscription {}", throwable.getMessage(), diff --git a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/tasks/ProducerSupervision.java b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/tasks/ProducerSupervision.java index db7c29ba..08c5fc85 100644 --- a/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/tasks/ProducerSupervision.java +++ b/enrichment-coordinator-service/src/main/java/org/oransc/enrichment/tasks/ProducerSupervision.java @@ -80,7 +80,7 @@ public class ProducerSupervision { })// .doOnNext(response -> handleRespondingProducer(response, producer)) .flatMap(response -> checkProducerJobs(producer)) // - .flatMap(responses -> Mono.just(producer)); + .map(responses -> producer); } private Mono checkProducerJobs(InfoProducer producer) { diff --git a/enrichment-coordinator-service/src/test/java/org/oransc/enrichment/ApplicationTest.java b/enrichment-coordinator-service/src/test/java/org/oransc/enrichment/ApplicationTest.java index 44184296..8c8ce5f1 100644 --- a/enrichment-coordinator-service/src/test/java/org/oransc/enrichment/ApplicationTest.java +++ b/enrichment-coordinator-service/src/test/java/org/oransc/enrichment/ApplicationTest.java @@ -1028,7 +1028,7 @@ class ApplicationTest { // Test that subscriptions are removed for a unresponsive consumer // PUT a subscription with a junk callback - final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "JUNK", "owner"); + final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "/JUNK", "owner"); String body = gson.toJson(info); restClient().putForEntity(typeSubscriptionUrl() + "/subscriptionId", body).block(); assertThat(this.infoTypeSubscriptions.size()).isEqualTo(1); diff --git a/onap/oran b/onap/oran index 3b916e4d..558d6d2d 160000 --- a/onap/oran +++ b/onap/oran @@ -1 +1 @@ -Subproject commit 3b916e4dc5777863cb4ee873b41ee460fb9aec27 +Subproject commit 558d6d2de33bb8cf4b16df980a0cdf3b1747a8e2 diff --git a/test/auto-test/FTC1.sh b/test/auto-test/FTC1.sh index 5d718b0e..e4ffe75b 100755 --- a/test/auto-test/FTC1.sh +++ b/test/auto-test/FTC1.sh @@ -24,7 +24,7 @@ TC_ONELINE_DESCR="Sanity test, create service and then create,update and delete DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY" #App names to include in the test when running kubernetes, space separated list -KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW" +KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY " #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list KUBE_PRESTARTED_IMAGES="" @@ -119,7 +119,8 @@ for __httpx in $TESTED_PROTOCOLS ; do start_ric_simulators ricsim_g3 1 STD_2.0.0 fi - start_mr + start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \ + "$MR_WRITE_TOPIC" "/events" "users/mr-stub" start_cr diff --git a/test/auto-test/FTC3000.sh b/test/auto-test/FTC3000.sh index da4bf1e1..4c261b4c 100755 --- a/test/auto-test/FTC3000.sh +++ b/test/auto-test/FTC3000.sh @@ -20,10 +20,10 @@ TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter" #App names to include in the test when running docker, space separated list -DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR" +DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR" #App names to include in the test when running kubernetes, space separated list -KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR" +KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR" #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list KUBE_PRESTARTED_IMAGES="" @@ -81,7 +81,9 @@ start_ecs NOPROXY $SIM_GROUP/$ECS_COMPOSE_DIR/$ECS_CONFIG_FILE set_ecs_trace -start_mr +start_mr "unauthenticated.dmaapmed.json" "/events" "dmaapmediatorproducer/STD_Fault_Messages" \ + "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \ + "unauthenticated.dmaapadp_kafka.text" "/events" "dmaapadapterproducer/msgs" start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE @@ -93,23 +95,33 @@ ecs_equal json:data-producer/v1/info-producers 2 60 # Check producers ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY -ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages +ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer -# Create jobs for adapter +# Create jobs for adapter - CR stores data as MD5 hash start_timer "Create adapter jobs: $NUM_JOBS" for ((i=1; i<=$NUM_JOBS; i++)) do - ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json + ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i"?storeas=md5" info-owner-adp-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json + done print_timer "Create adapter jobs: $NUM_JOBS" -# Create jobs for mediator +# Create jobs for adapter kafka - CR stores data as MD5 hash +start_timer "Create adapter (kafka) jobs: $NUM_JOBS" +for ((i=1; i<=$NUM_JOBS; i++)) +do + ecs_api_idc_put_job 201 job-adp-kafka-$i ExampleInformationTypeKafka $CR_SERVICE_TEXT_PATH/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-kafka-$i testdata/dmaap-adapter/job-template-1-kafka.json + +done +print_timer "Create adapter (kafka) jobs: $NUM_JOBS" + +# Create jobs for mediator - CR stores data as MD5 hash start_timer "Create mediator jobs: $NUM_JOBS" for ((i=1; i<=$NUM_JOBS; i++)) do - ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json + ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i"?storeas=md5" info-owner-med-$i $CR_SERVICE_APP_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json done print_timer "Create mediator jobs: $NUM_JOBS" @@ -118,11 +130,117 @@ for ((i=1; i<=$NUM_JOBS; i++)) do ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30 ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30 + ecs_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30 done + EXPECTED_DATA_DELIV=0 -# Send data to adapter via mr +mr_api_generate_json_payload_file 1 ./tmp/data_for_dmaap_test.json +mr_api_generate_text_payload_file 1 ./tmp/data_for_dmaap_test.txt + +## Send json file via message-router to adapter + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) + +mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +# Check received data callbacks from adapter +for ((i=1; i<=$NUM_JOBS; i++)) +do + cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json +done + + +## Send text file via message-router to adapter kafka + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) + +mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +# Check received data callbacks from adapter kafka +for ((i=1; i<=$NUM_JOBS; i++)) +do + cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt + cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt + cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt + cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt + cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt +done + +## Send json file via message-router to mediator + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) + +mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV)) +mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json +cr_equal received_callbacks $EXPECTED_DATA_DELIV 200 + +# Check received data callbacks from mediator +for ((i=1; i<=$NUM_JOBS; i++)) +do + cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json + cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json +done + + +# Send small json via message-router to adapter mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}' mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}' @@ -131,9 +249,18 @@ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) start_timer "Data delivery adapter, 2 json per job" cr_equal received_callbacks $EXPECTED_DATA_DELIV 100 print_timer "Data delivery adapter, 2 json per job" -EXPECTED_DATA_DELIV=$(cr_read received_callbacks) -# Send data to mediator +# Send small text via message-routere to adapter +mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------1' +mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------3' + +# Wait for data recetption, adapter kafka +EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) +start_timer "Data delivery adapte kafkar, 2 strings per job" +cr_equal received_callbacks $EXPECTED_DATA_DELIV 100 +print_timer "Data delivery adapte kafkar, 2 strings per job" + +# Send small json via message-router to mediator mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}' mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}' @@ -142,73 +269,85 @@ EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) start_timer "Data delivery mediator, 2 json per job" cr_equal received_callbacks $EXPECTED_DATA_DELIV 100 print_timer "Data delivery mediator, 2 json per job" -EXPECTED_DATA_DELIV=$(cr_read received_callbacks) # Check received number of messages for mediator and adapter callbacks for ((i=1; i<=$NUM_JOBS; i++)) do - cr_equal received_callbacks?id=job-med-data$i 2 - cr_equal received_callbacks?id=job-adp-data$i 2 + cr_equal received_callbacks?id=job-med-data$i 7 + cr_equal received_callbacks?id=job-adp-data$i 7 + cr_equal received_callbacks?id=job-adp-kafka-data$i 7 done # Check received data and order for mediator and adapter callbacks for ((i=1; i<=$NUM_JOBS; i++)) do - cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}' - cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}' - cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}' - cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}' + cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-0"}' + cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-2"}' + cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-1"}' + cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-3"}' + cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------1' + cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------3' done # Set delay in the callback receiver to slow down callbacks -SEC_DELAY=5 +SEC_DELAY=2 cr_delay_callback 200 $SEC_DELAY -# Send data to adapter via mr +# Send small json via message-router to adapter mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}' mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}' # Wait for data recetption, adapter EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) -start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job" +start_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job" cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300)) -print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job" -EXPECTED_DATA_DELIV=$(cr_read received_callbacks) +print_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job" + +# Send small text via message-router to adapter kafka +mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------5' +mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------7' -# Send data to mediator +# Wait for data recetption, adapter kafka +EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) +start_timer "Data delivery adapter kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job" +cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300)) +print_timer "Data delivery adapter with kafka $SEC_DELAY seconds delay in consumer, 2 strings per job" + + +# Send small json via message-router to mediator mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}' mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}' # Wait for data reception, mediator EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV)) -start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job" +start_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job" cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000 -print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job" -EXPECTED_DATA_DELIV=$(cr_read received_callbacks) +print_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job" # Check received number of messages for mediator and adapter callbacks for ((i=1; i<=$NUM_JOBS; i++)) do - cr_equal received_callbacks?id=job-med-data$i 4 - cr_equal received_callbacks?id=job-adp-data$i 4 + cr_equal received_callbacks?id=job-med-data$i 9 + cr_equal received_callbacks?id=job-adp-data$i 9 + cr_equal received_callbacks?id=job-adp-kafka-data$i 9 done # Check received data and order for mediator and adapter callbacks for ((i=1; i<=$NUM_JOBS; i++)) do - cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}' - cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}' - cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}' - cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}' + cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-4"}' + cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-6"}' + cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-5"}' + cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-7"}' + cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------5' + cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------7' done - - #### TEST COMPLETE #### store_logs END print_result -auto_clean_environment \ No newline at end of file +auto_clean_environment diff --git a/test/auto-test/ONAP_UC.sh b/test/auto-test/ONAP_UC.sh index 15b5c5bb..03697bcd 100755 --- a/test/auto-test/ONAP_UC.sh +++ b/test/auto-test/ONAP_UC.sh @@ -23,7 +23,7 @@ TC_ONELINE_DESCR="ONAP Use case REQ-626" DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY" #App names to include in the test when running kubernetes, space separated list -KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW" +KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC KUBEPROXY NGW" #Prestarted app (not started by script) to include in the test when running kubernetes, space separated list KUBE_PRESTARTED_IMAGES="" @@ -99,7 +99,8 @@ for interface in $TESTED_VARIANTS ; do start_ric_simulators $RIC_SIM_PREFIX"_g3" $STD_NUM_RICS STD_2.0.0 - start_mr + start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \ + "$MR_WRITE_TOPIC" "/events" "users/mr-stub" start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE diff --git a/test/auto-test/startMR.sh b/test/auto-test/startMR.sh index 47b45149..27bdb4e5 100755 --- a/test/auto-test/startMR.sh +++ b/test/auto-test/startMR.sh @@ -56,7 +56,11 @@ setup_testenvironment clean_environment start_kube_proxy -start_mr +start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \ + "$MR_WRITE_TOPIC" "/events" "users/mr-stub" \ + "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \ + "unauthenticated.dmaapmed.json" "/events" "maapmediatorproducer/STD_Fault_Messages" + if [ $RUNMODE == "KUBE" ]; then : else diff --git a/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka b/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka new file mode 100644 index 00000000..290b70ae --- /dev/null +++ b/test/auto-test/testdata/dmaap-adapter/job-schema-1-kafka @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "filter": { + "type": "string" + }, + "maxConcurrency": { + "type": "integer" + }, + "bufferTimeout": { + "type": "object", + "properties": { + "maxSize": { + "type": "integer" + }, + "maxTimeMiliseconds": { + "type": "integer" + } + }, + "required": [ + "maxSize", + "maxTimeMiliseconds" + ] + } + }, + "required": [] +} \ No newline at end of file diff --git a/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json b/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json new file mode 100644 index 00000000..d549397a --- /dev/null +++ b/test/auto-test/testdata/dmaap-adapter/job-template-1-kafka.json @@ -0,0 +1,7 @@ +{ + "maxConcurrency": 1, + "bufferTimeout": { + "maxSize": 1, + "maxTimeMiliseconds": 0 + } +} \ No newline at end of file diff --git a/test/common/README.md b/test/common/README.md index 18b96566..3577cfa6 100644 --- a/test/common/README.md +++ b/test/common/README.md @@ -153,6 +153,7 @@ The script can be started with these arguments | `--print-stats` | Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config | | `--override ` | Override setting from the file supplied by --env-file | | `--pre-clean` | Clean kube resouces when running docker and vice versa | +| `--gen-stats` | Collect container/pod runtime statistics | | `help` | Print this info along with the test script description and the list of app short names supported | ## Function: setup_testenvironment ## diff --git a/test/common/agent_api_functions.sh b/test/common/agent_api_functions.sh index a1fd6577..4cedad1c 100644 --- a/test/common/agent_api_functions.sh +++ b/test/common/agent_api_functions.sh @@ -91,6 +91,19 @@ __PA_initial_setup() { use_agent_rest_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__PA_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "PA $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "PA $POLICY_AGENT_APP_NAME" + fi +} + + ####################################################### ########################### diff --git a/test/common/api_curl.sh b/test/common/api_curl.sh index 17f80a58..f2777ebf 100644 --- a/test/common/api_curl.sh +++ b/test/common/api_curl.sh @@ -23,7 +23,8 @@ # one for sending the requests and one for receiving the response # but only when using the DMAAP interface # REST or DMAAP is controlled of the base url of $XX_ADAPTER -# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH | []) | (PA|ECS RESPONSE ) +# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH | [ [mime-type]]) | (PA|ECS RESPONSE ) +# Default mime type for file is application/json unless specified in parameter mime-type # (Not for test scripts) __do_curl_to_api() { TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S") @@ -39,6 +40,7 @@ __do_curl_to_api() { paramError=0 input_url=$3 + fname=$4 if [ $# -gt 0 ]; then if [ $1 == "PA" ]; then __ADAPTER=$PA_ADAPTER @@ -75,17 +77,21 @@ __do_curl_to_api() { __ADAPTER=$MR_STUB_ADAPTER __ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE __RETRY_CODES="" - else + elif [ $1 == "DMAAPMR" ]; then + __ADAPTER=$MR_DMAAP_ADAPTER_HTTP + __ADAPTER_TYPE=$MR_DMAAP_ADAPTER_TYPE + __RETRY_CODES="" + else paramError=1 fi - if [ $__ADAPTER_TYPE == "MR-HTTP" ]; then + if [ "$__ADAPTER_TYPE" == "MR-HTTP" ]; then __ADAPTER=$MR_ADAPTER_HTTP fi - if [ $__ADAPTER_TYPE == "MR-HTTPS" ]; then + if [ "$__ADAPTER_TYPE" == "MR-HTTPS" ]; then __ADAPTER=$MR_ADAPTER_HTTPS fi fi - if [ $# -lt 3 ] || [ $# -gt 4 ]; then + if [ $# -lt 3 ] || [ $# -gt 5 ]; then paramError=1 else timeout="" @@ -100,6 +106,10 @@ __do_curl_to_api() { fi if [ $# -gt 3 ]; then content=" -H Content-Type:application/json" + fname=$4 + if [ $# -gt 4 ]; then + content=" -H Content-Type:"$5 + fi fi if [ $2 == "GET" ] || [ $2 == "GET_BATCH" ]; then oper="GET" @@ -108,15 +118,15 @@ __do_curl_to_api() { fi elif [ $2 == "PUT" ] || [ $2 == "PUT_BATCH" ]; then oper="PUT" - if [ $# -eq 4 ]; then - file=" --data-binary @$4" + if [ $# -gt 3 ]; then + file=" --data-binary @$fname" fi accept=" -H accept:application/json" elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then oper="POST" accept=" -H accept:*/*" - if [ $# -eq 4 ]; then - file=" --data-binary @$4" + if [ $# -gt 3 ]; then + file=" --data-binary @$fname" accept=" -H accept:application/json" fi elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then @@ -153,8 +163,8 @@ __do_curl_to_api() { oper=" -X "$oper curlString="curl -k $proxyflag "${oper}${timeout}${httpcode}${accept}${content}${url}${file} echo " CMD: "$curlString >> $HTTPLOG - if [ $# -eq 4 ]; then - echo " FILE: $(<$4)" >> $HTTPLOG + if [ $# -gt 3 ]; then + echo " FILE: $(<$fname)" >> $HTTPLOG fi # Do retry for configured response codes, otherwise only one attempt @@ -190,12 +200,12 @@ __do_curl_to_api() { else if [ $oper != "RESPONSE" ]; then requestUrl=$input_url - if [ $2 == "PUT" ] && [ $# -eq 4 ]; then - payload="$(cat $4 | tr -d '\n' | tr -d ' ' )" + if [ $2 == "PUT" ] && [ $# -gt 3 ]; then + payload="$(cat $fname | tr -d '\n' | tr -d ' ' )" echo "payload: "$payload >> $HTTPLOG file=" --data-binary "$payload - elif [ $# -eq 4 ]; then - echo " FILE: $(cat $4)" >> $HTTPLOG + elif [ $# -gt 3 ]; then + echo " FILE: $(cat $fname)" >> $HTTPLOG fi #urlencode the request url since it will be carried by send-request url requestUrl=$(python3 -c "from __future__ import print_function; import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "$input_url") diff --git a/test/common/consul_cbs_functions.sh b/test/common/consul_cbs_functions.sh index 747eaaba..cd1b16c0 100644 --- a/test/common/consul_cbs_functions.sh +++ b/test/common/consul_cbs_functions.sh @@ -165,6 +165,21 @@ __CBS_initial_setup() { CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__CONSUL_statisics_setup() { + echo "" +} + +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__CBS_statisics_setup() { + echo "" +} ####################################################### diff --git a/test/common/control_panel_api_functions.sh b/test/common/control_panel_api_functions.sh index eda6fe3a..295e16ab 100644 --- a/test/common/control_panel_api_functions.sh +++ b/test/common/control_panel_api_functions.sh @@ -91,6 +91,19 @@ __CP_store_docker_logs() { __CP_initial_setup() { use_control_panel_http } + +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__CP_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "CP $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "CP $CONTROL_PANEL_APP_NAME" + fi +} + ####################################################### diff --git a/test/common/controller_api_functions.sh b/test/common/controller_api_functions.sh index 4027f30f..b3ef07b9 100644 --- a/test/common/controller_api_functions.sh +++ b/test/common/controller_api_functions.sh @@ -73,7 +73,7 @@ __SDNC_image_data() { # All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action. # This function is called for apps fully managed by the test script __SDNC_kube_scale_zero() { - __kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC + __kube_scale_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC } # Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action. @@ -85,7 +85,7 @@ __SDNC_kube_scale_zero_and_wait() { # Delete all kube resouces for the app # This function is called for apps managed by the test script. __SDNC_kube_delete_all() { - __kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC + __kube_delete_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC } # Store docker logs @@ -93,9 +93,9 @@ __SDNC_kube_delete_all() { # args: __SDNC_store_docker_logs() { if [ $RUNMODE == "KUBE" ]; then - kubectl logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1 - podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name") - kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1 + kubectl logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1 + podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name") + kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1 else docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1 fi @@ -108,6 +108,18 @@ __SDNC_initial_setup() { use_sdnc_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__SDNC_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "SDNC $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE" + else + echo "SDNC $SDNC_APP_NAME" + fi +} + ####################################################### # Set http as the protocol to use for all communication to SDNC @@ -135,8 +147,8 @@ __sdnc_set_protocoll() { SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2 # docker access, container->container and script->container via proxy SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL if [ $RUNMODE == "KUBE" ]; then - SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy - SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL + SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SDNC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy + SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SDNC_NAMESPACE":"$1$SDNC_API_URL fi echo "" @@ -145,7 +157,7 @@ __sdnc_set_protocoll() { # Export env vars for config files, docker compose and kube resources # args: __sdnc_export_vars() { - export KUBE_SNDC_NAMESPACE + export KUBE_SDNC_NAMESPACE export DOCKER_SIM_NWNAME export SDNC_APP_NAME @@ -199,7 +211,7 @@ start_sdnc() { if [ $retcode_p -eq 0 ]; then echo -e " Using existing $SDNC_APP_NAME deployment and service" echo " Setting SDNC replicas=1" - __kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1 + __kube_scale deployment $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE 1 fi # Check if app shall be fully managed by the test script @@ -208,7 +220,7 @@ start_sdnc() { echo -e " Creating $SDNC_APP_NAME app and expose service" #Check if namespace exists, if not create it - __kube_create_namespace $KUBE_SNDC_NAMESPACE + __kube_create_namespace $KUBE_SDNC_NAMESPACE __sdnc_export_vars diff --git a/test/common/cr_api_functions.sh b/test/common/cr_api_functions.sh index ba465101..a537bc85 100644 --- a/test/common/cr_api_functions.sh +++ b/test/common/cr_api_functions.sh @@ -107,6 +107,18 @@ __CR_initial_setup() { use_cr_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__CR_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "CR $CR_APP_NAME $KUBE_SIM_NAMESPACE" + else + echo "CR $CR_APP_NAME" + fi +} + ####################################################### ################ @@ -142,6 +154,7 @@ __cr_set_protocoll() { fi # Service paths are used in test script to provide callbacck urls to app CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR #Only for messages from dmaap adapter/mediator + CR_SERVICE_TEXT_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_TEXT #Callbacks for text payload CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK #For general callbacks from apps # CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently) @@ -573,6 +586,10 @@ cr_api_check_single_genric_json_event() { body=${res:0:${#res}-3} targetJson=$3 + if [ $targetJson == "EMPTY" ] && [ ${#body} -ne 0 ]; then + __log_test_fail_body + return 1 + fi echo " TARGET JSON: $targetJson" >> $HTTPLOG res=$(python3 ../common/compare_json.py "$targetJson" "$body") @@ -581,6 +598,126 @@ cr_api_check_single_genric_json_event() { return 1 fi + __log_test_pass + return 0 +} + +# CR API: Check a single (oldest) json in md5 format (or none if empty) for path. +# Note that if a json message is given, it shall be compact, no ws except inside string. +# The MD5 will generate different hash if ws is present or not in otherwise equivalent json +# arg: (EMPTY | ) +# (Function for test scripts) +cr_api_check_single_genric_event_md5() { + __log_test_start $@ + + if [ $# -ne 3 ]; then + __print_err " (EMPTY | )" $@ + return 1 + fi + + query="/get-event/"$2 + res="$(__do_curl_to_api CR GET $query)" + status=${res:${#res}-3} + + if [ $status -ne $1 ]; then + __log_test_fail_status_code $1 $status + return 1 + fi + body=${res:0:${#res}-3} + if [ $3 == "EMPTY" ]; then + if [ ${#body} -ne 0 ]; then + __log_test_fail_body + return 1 + else + __log_test_pass + return 0 + fi + fi + command -v md5 > /dev/null # Mac + if [ $? -eq 0 ]; then + targetMd5=$(echo -n "$3" | md5) + else + command -v md5sum > /dev/null # Linux + if [ $? -eq 0 ]; then + targetMd5=$(echo -n "$3" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd + else + __log_test_fail_general "Command md5 nor md5sum is available" + return 1 + fi + fi + targetMd5="\""$targetMd5"\"" #Quotes needed + + echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG + + if [ "$body" != "$targetMd5" ]; then + __log_test_fail_body + return 1 + fi + + __log_test_pass + return 0 +} + +# CR API: Check a single (oldest) event in md5 format (or none if empty) for path. +# Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines. +# The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json +# arg: (EMPTY | ) +# (Function for test scripts) +cr_api_check_single_genric_event_md5_file() { + __log_test_start $@ + + if [ $# -ne 3 ]; then + __print_err " (EMPTY | )" $@ + return 1 + fi + + query="/get-event/"$2 + res="$(__do_curl_to_api CR GET $query)" + status=${res:${#res}-3} + + if [ $status -ne $1 ]; then + __log_test_fail_status_code $1 $status + return 1 + fi + body=${res:0:${#res}-3} + if [ $3 == "EMPTY" ]; then + if [ ${#body} -ne 0 ]; then + __log_test_fail_body + return 1 + else + __log_test_pass + return 0 + fi + fi + + if [ ! -f $3 ]; then + __log_test_fail_general "File $3 does not exist" + return 1 + fi + + filedata=$(cat $3) + + command -v md5 > /dev/null # Mac + if [ $? -eq 0 ]; then + targetMd5=$(echo -n "$filedata" | md5) + else + command -v md5sum > /dev/null # Linux + if [ $? -eq 0 ]; then + targetMd5=$(echo -n "$filedata" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd + else + __log_test_fail_general "Command md5 nor md5sum is available" + return 1 + fi + fi + targetMd5="\""$targetMd5"\"" #Quotes needed + + echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG + + if [ "$body" != "$targetMd5" ]; then + __log_test_fail_body + return 1 + fi + __log_test_pass return 0 } \ No newline at end of file diff --git a/test/common/dmaapadp_api_functions.sh b/test/common/dmaapadp_api_functions.sh index 26da2d08..9b7571f3 100644 --- a/test/common/dmaapadp_api_functions.sh +++ b/test/common/dmaapadp_api_functions.sh @@ -92,6 +92,18 @@ __DMAAPADP_initial_setup() { use_dmaapadp_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__DMAAPADP_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "DMAAPADP $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "DMAAPADP $DMAAP_ADP_APP_NAME" + fi +} + ####################################################### # Set http as the protocol to use for all communication to the Dmaap adapter diff --git a/test/common/dmaapmed_api_functions.sh b/test/common/dmaapmed_api_functions.sh index 16e1ad70..5188a454 100644 --- a/test/common/dmaapmed_api_functions.sh +++ b/test/common/dmaapmed_api_functions.sh @@ -92,6 +92,18 @@ __DMAAPMED_initial_setup() { use_dmaapmed_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__DMAAPMED_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "DMAAPMED $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "DMAAPMED $DMAAP_MED_APP_NAME" + fi +} + ####################################################### # Set http as the protocol to use for all communication to the Dmaap mediator diff --git a/test/common/ecs_api_functions.sh b/test/common/ecs_api_functions.sh index 2b434f19..b28c061c 100644 --- a/test/common/ecs_api_functions.sh +++ b/test/common/ecs_api_functions.sh @@ -91,6 +91,18 @@ __ECS_initial_setup() { use_ecs_rest_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__ECS_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "ECS $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "ECS $ECS_APP_NAME" + fi +} + ####################################################### diff --git a/test/common/gateway_api_functions.sh b/test/common/gateway_api_functions.sh index ee617ef3..d8f1707b 100644 --- a/test/common/gateway_api_functions.sh +++ b/test/common/gateway_api_functions.sh @@ -92,6 +92,18 @@ __NGW_initial_setup() { use_gateway_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__NGW_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "NGW $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "NGW $NRT_GATEWAY_APP_NAME" + fi +} + ####################################################### diff --git a/test/common/genstat.sh b/test/common/genstat.sh new file mode 100755 index 00000000..3c329d9b --- /dev/null +++ b/test/common/genstat.sh @@ -0,0 +1,135 @@ +#!/bin/bash + +# ============LICENSE_START=============================================== +# Copyright (C) 2020 Nordix Foundation. All rights reserved. +# ======================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END================================================= +# + +# This script collects container statistics to a file. Data is separated with semicolon. +# Works for both docker container and kubernetes pods. +# Relies on 'docker stats' so will not work for other container runtimes. +# Used by the test env. + +# args: docker [ ]* +# or +# args: kube [ ]* + +print_usage() { + echo "Usage: genstat.sh DOCKER [ ]*" + echo "or" + echo "Usage: genstat.sh KUBE [ ]*" +} + +STARTTIME=-1 + +if [ $# -lt 4 ]; then + print_usage + exit 1 +fi +if [ $1 == "DOCKER" ]; then + STAT_TYPE=$1 + shift + STARTTIME=$1 + shift + LOGFILE=$1 + shift + if [ $(($#%2)) -ne 0 ]; then + print_usage + exit 1 + fi +elif [ $1 == "KUBE" ]; then + STAT_TYPE=$1 + shift + STARTTIME=$1 + shift + LOGFILE=$1 + shift + if [ $(($#%3)) -ne 0 ]; then + print_usage + exit 1 + fi +else + print_usage + exit 1 +fi + + +echo "Time;Name;PIDS;CPU perc;Mem perc" > $LOGFILE + +if [ "$STARTTIME" -ne -1 ]; then + STARTTIME=$(($SECONDS-$STARTTIME)) +fi + +while [ true ]; do + docker stats --no-stream --format "table {{.Name}};{{.PIDs}};{{.CPUPerc}};{{.MemPerc}}" > tmp/.tmp_stat_out.txt + if [ "$STARTTIME" -eq -1 ]; then + STARTTIME=$SECONDS + fi + CTIME=$(($SECONDS-$STARTTIME)) + + TMP_APPS="" + + while read -r line; do + APP_LIST=(${@}) + if [ $STAT_TYPE == "DOCKER" ]; then + for ((i=0; i<$#; i=i+2)); do + SAPP=${APP_LIST[$i]} + APP=${APP_LIST[$i+1]} + d=$(echo $line | grep -v "k8s" | grep $APP) + if [ ! -z $d ]; then + d=$(echo $d | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g') + echo "$SAPP;$CTIME;$d" >> $LOGFILE + TMP_APPS=$TMP_APPS" $SAPP " + fi + done + else + for ((i=0; i<$#; i=i+3)); do + SAPP=${APP_LIST[$i]} + APP=${APP_LIST[$i+1]} + NS=${APP_LIST[$i+2]} + d=$(echo "$line" | grep -v "k8s_POD" | grep "k8s" | grep $APP | grep $NS) + if [ ! -z "$d" ]; then + d=$(echo "$d" | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g') + data="$SAPP-$NS;$CTIME;$d" + echo $data >> $LOGFILE + TMP_APPS=$TMP_APPS" $SAPP-$NS " + fi + done + fi + done < tmp/.tmp_stat_out.txt + + APP_LIST=(${@}) + if [ $STAT_TYPE == "DOCKER" ]; then + for ((i=0; i<$#; i=i+2)); do + SAPP=${APP_LIST[$i]} + APP=${APP_LIST[$i+1]} + if [[ $TMP_APPS != *" $SAPP "* ]]; then + data="$SAPP;$CTIME;0;0,00;0,00" + echo $data >> $LOGFILE + fi + done + else + for ((i=0; i<$#; i=i+3)); do + SAPP=${APP_LIST[$i]} + APP=${APP_LIST[$i+1]} + NS=${APP_LIST[$i+2]} + if [[ $TMP_APPS != *" $SAPP-$NS "* ]]; then + data="$SAPP-$NS;$CTIME;0;0,00;0,00" + echo $data >> $LOGFILE + fi + done + fi + sleep 1 +done diff --git a/test/common/http_proxy_api_functions.sh b/test/common/http_proxy_api_functions.sh index 56ce6d43..3378a1dd 100644 --- a/test/common/http_proxy_api_functions.sh +++ b/test/common/http_proxy_api_functions.sh @@ -106,6 +106,18 @@ __HTTPPROXY_initial_setup() { : } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__HTTPPROXY_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "HTTPPROXY $HTTP_PROXY_APP_NAME $KUBE_SIM_NAMESPACE" + else + echo "HTTPPROXY $HTTP_PROXY_APP_NAME" + fi +} + ####################################################### diff --git a/test/common/kube_proxy_api_functions.sh b/test/common/kube_proxy_api_functions.sh index dcaaf802..eb4600cc 100644 --- a/test/common/kube_proxy_api_functions.sh +++ b/test/common/kube_proxy_api_functions.sh @@ -107,6 +107,18 @@ __KUBEPROXY_initial_setup() { use_kube_proxy_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__KUBEPROXY_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "KUBEPROXXY $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE" + else + echo "KUBEPROXXY $KUBE_PROXY_APP_NAME" + fi +} + ####################################################### ## Access to Kube http proxy diff --git a/test/common/mr_api_functions.sh b/test/common/mr_api_functions.sh index c6a5a2c7..da3e34db 100755 --- a/test/common/mr_api_functions.sh +++ b/test/common/mr_api_functions.sh @@ -193,19 +193,84 @@ __DMAAPMR_initial_setup() { : # handle by __MR_initial_setup } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__MR_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "MR $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE" + else + echo "MR $MR_STUB_APP_NAME" + fi +} + +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__DMAAPMR_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "" + else + echo "" + fi +} ####################################################### +# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER +# +# 'MR-STUB only' is started when only 'MR' is included in the test script. Both the test scripts and app will then use MR-STUB as a message-router simulator. +# +# 'MR-STUB + MESSAGE-ROUTER' is started when 'MR' and 'DMAAPMR' is included in the testscripts. DMAAPMR is the real message router including kafka and zookeeper. +# In this configuration, MR-STUB is used by the test-script as frontend to the message-router while app are using the real message-router. +# +# DOCKER KUBE +# --------------------------------------------------------------------------------------------------------------------------------------------------- + +# MR-STUB MR-STUB +# +++++++ +++++++ +# localhost container service pod +# ============================================================================================================================================== +# 10 MR_STUB_LOCALHOST_PORT -> 13 MR_INTERNAL_PORT 15 MR_EXTERNAL_PORT -> 17 MR_INTERNAL_PORT +# 12 MR_STUB_LOCALHOST_SECURE_PORT -> 14 MR_INTERNAL_SECURE_PORT 16 MR_EXTERNAL_SECURE_PORT -> 18 MR_INTERNAL_SECURE_PORT + + + +# MESSAGE-ROUTER MESSAGE-ROUTER +# ++++++++++++++ ++++++++++++++ +# localhost container service pod +# =================================================================================================================================================== +# 20 MR_DMAAP_LOCALHOST_PORT -> 23 MR_INTERNAL_PORT 25 MR_EXTERNAL_PORT -> 27 MR_INTERNAL_PORT +# 22 MR_DMAAP_LOCALHOST_SECURE_PORT -> 24 MR_INTERNAL_SECURE_PORT 26 MR_EXTERNAL_SECURE_PORT -> 28 MR_INTERNAL_SECURE_PORT + + +# Running only the MR-STUB - apps using MR-STUB +# DOCKER KUBE +# localhost: 10 and 12 - +# via proxy (script): 13 and 14 via proxy (script): 15 and 16 +# apps: 13 and 14 apps: 15 and 16 + +# Running MR-STUB (as frontend for test script) and MESSAGE-ROUTER - apps using MESSAGE-ROUTER +# DOCKER KUBE +# localhost: 10 and 12 - +# via proxy (script): 13 and 14 via proxy (script): 15 and 16 +# apps: 23 and 24 apps: 25 and 26 +# + + + use_mr_http() { - __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT + __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT } use_mr_https() { - __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT + __mr_set_protocoll "https" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT } # Setup paths to svc/container for internal and external access -# args: +# args: __mr_set_protocoll() { echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME" @@ -214,39 +279,60 @@ __mr_set_protocoll() { MR_HTTPX=$1 + if [ $MR_HTTPX == "http" ]; then + INT_PORT=$2 + EXT_PORT=$3 + else + INT_PORT=$4 + EXT_PORT=$5 + fi + # Access via test script - MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker - MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker + MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$INT_PORT # access from script via proxy, docker + MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$INT_PORT # access from script via proxy, docker + MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube + MR_KAFKA_SERVICE_PATH="" __check_included_image "DMAAPMR" if [ $? -eq 0 ]; then MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube + MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH + + MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME":"$MR_KAFKA_PORT fi # For directing calls from script to e.g.PMS via message rounter - # Theses case shall always go though the mr-stub - MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4 - MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6 + # These cases shall always go though the mr-stub + MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$2 + MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$4 + + MR_DMAAP_ADAPTER_TYPE="REST" + + if [ $RUNMODE == "KUBE" ]; then - MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube - MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube + MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube + MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube MR_SERVICE_PATH=$MR_STUB_PATH __check_included_image "DMAAPMR" if [ $? -eq 0 ]; then MR_SERVICE_PATH=$MR_DMAAP_PATH + MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH + MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT fi __check_prestarted_image "DMAAPMR" if [ $? -eq 0 ]; then MR_SERVICE_PATH=$MR_DMAAP_PATH + MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH + MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT fi # For directing calls from script to e.g.PMS, via message rounter # These calls shall always go though the mr-stub - MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5 - MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7 + MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 + MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$5 fi # For calls from script to the mr-stub @@ -254,8 +340,77 @@ __mr_set_protocoll() { MR_STUB_ADAPTER_TYPE="REST" echo "" + } + +# use_mr_http() { 2 3 4 5 6 7 +# __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT +# } + +# use_mr_https() { +# __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT +# } + +# # Setup paths to svc/container for internal and external access +# # args: +# __mr_set_protocoll() { +# echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD +# echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME" + +# ## Access to Dmaap mediator + +# MR_HTTPX=$1 + +# # Access via test script +# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker +# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker +# MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included + +# MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube +# __check_included_image "DMAAPMR" +# if [ $? -eq 0 ]; then +# MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube +# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH +# fi + +# # For directing calls from script to e.g.PMS via message rounter +# # These cases shall always go though the mr-stub +# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4 +# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6 + +# MR_DMAAP_ADAPTER_TYPE="REST" + +# if [ $RUNMODE == "KUBE" ]; then +# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube +# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube + +# MR_SERVICE_PATH=$MR_STUB_PATH +# __check_included_image "DMAAPMR" +# if [ $? -eq 0 ]; then +# MR_SERVICE_PATH=$MR_DMAAP_PATH +# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH +# fi +# __check_prestarted_image "DMAAPMR" +# if [ $? -eq 0 ]; then +# MR_SERVICE_PATH=$MR_DMAAP_PATH +# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH +# fi + +# # For directing calls from script to e.g.PMS, via message rounter +# # These calls shall always go though the mr-stub +# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5 +# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7 +# fi + +# # For calls from script to the mr-stub +# MR_STUB_ADAPTER=$MR_STUB_PATH +# MR_STUB_ADAPTER_TYPE="REST" + +# echo "" + +# } + # Export env vars for config files, docker compose and kube resources # args: - __dmaapmr_export_vars() { @@ -272,6 +427,14 @@ __dmaapmr_export_vars() { export MR_DMAAP_LOCALHOST_SECURE_PORT export MR_INTERNAL_SECURE_PORT export MR_DMAAP_HOST_MNT_DIR + + export KUBE_ONAP_NAMESPACE + export MR_EXTERNAL_PORT + export MR_EXTERNAL_SECURE_PORT + export MR_KAFKA_PORT + export MR_ZOOKEEPER_PORT + + export MR_KAFKA_SERVICE_PATH } # Export env vars for config files, docker compose and kube resources @@ -283,10 +446,17 @@ __mr_export_vars() { export MRSTUB_IMAGE export MR_INTERNAL_PORT export MR_INTERNAL_SECURE_PORT + export MR_EXTERNAL_PORT + export MR_EXTERNAL_SECURE_PORT export MR_STUB_LOCALHOST_PORT export MR_STUB_LOCALHOST_SECURE_PORT export MR_STUB_CERT_MOUNT_DIR export MR_STUB_DISPLAY_NAME + + export KUBE_ONAP_NAMESPACE + export MR_EXTERNAL_PORT + + export MR_KAFKA_SERVICE_PATH } @@ -358,53 +528,33 @@ start_mr() { __dmaapmr_export_vars - #export MR_DMAAP_APP_NAME - export MR_DMAAP_KUBE_APP_NAME=message-router - MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME - export KUBE_ONAP_NAMESPACE - export MR_EXTERNAL_PORT - export MR_INTERNAL_PORT - export MR_EXTERNAL_SECURE_PORT - export MR_INTERNAL_SECURE_PORT - export ONAP_DMAAPMR_IMAGE - - export MR_KAFKA_BWDS_NAME=akfak-bwds - export MR_KAFKA_BWDS_NAME=kaka - export KUBE_ONAP_NAMESPACE - - export MR_ZOOKEEPER_APP_NAME - export ONAP_ZOOKEEPER_IMAGE - #Check if onap namespace exists, if not create it __kube_create_namespace $KUBE_ONAP_NAMESPACE - # TODO - Fix domain name substitution in the prop file - # Create config maps - dmaapmr app - configfile=$PWD/tmp/MsgRtrApi.properties - cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile + # copy config files + MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR + cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH + # Create config maps - dmaapmr app + configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml __kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml - configfile=$PWD/tmp/logback.xml - cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile + configfile=$MR_MNT_CONFIG_BASEPATH/mr/logback.xml output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml __kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml - configfile=$PWD/tmp/cadi.properties - cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile + configfile=$MR_MNT_CONFIG_BASEPATH/mr/cadi.properties output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml __kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml # Create config maps - kafka app - configfile=$PWD/tmp/zk_client_jaas.conf - cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile + configfile=$MR_MNT_CONFIG_BASEPATH/kafka/zk_client_jaas.conf output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml __kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml # Create config maps - zookeeper app - configfile=$PWD/tmp/zk_server_jaas.conf - cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile + configfile=$MR_MNT_CONFIG_BASEPATH/zk/zk_server_jaas.conf output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml __kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml @@ -419,42 +569,69 @@ start_mr() { __kube_create_instance app $MR_DMAAP_APP_NAME $input_yaml $output_yaml - echo " Retrieving host and ports for service..." - MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE) + # echo " Retrieving host and ports for service..." + # MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE) - MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http") - MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https") + # MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http") + # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https") - echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT" - MR_SERVICE_PATH="" - if [ $MR_HTTPX == "http" ]; then - MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT - MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT - else - MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT - MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT + # echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT" + # MR_SERVICE_PATH="" + # if [ $MR_HTTPX == "http" ]; then + # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT + # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT + # else + # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT + # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT + # fi + + __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL + + # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below + #__create_topic $MR_READ_TOPIC "Topic for reading policy messages" + + #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages" + +# __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100" +# +# __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100" + + + #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100" + #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100" + + if [ $# -gt 0 ]; then + if [ $(($#%3)) -eq 0 ]; then + while [ $# -gt 0 ]; do + __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100" + shift; shift; shift; + done + else + echo -e $RED" args: start_mr [ ]*"$ERED + echo -e $RED" Got: $@"$ERED + exit 1 + fi fi - __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL + echo " Current topics:" + curlString="$MR_DMAAP_PATH/topics" + result=$(__do_curl "$curlString") + echo $result | indent2 fi if [ $retcode_included_mr -eq 0 ]; then - #exporting needed var for deployment - export MR_STUB_APP_NAME - export KUBE_ONAP_NAMESPACE - export MRSTUB_IMAGE - export MR_INTERNAL_PORT - export MR_INTERNAL_SECURE_PORT - export MR_EXTERNAL_PORT - export MR_EXTERNAL_SECURE_PORT + + __mr_export_vars if [ $retcode_prestarted_dmaapmr -eq 0 ] || [ $retcode_included_dmaapmr -eq 0 ]; then # Set topics for dmaap export TOPIC_READ="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC" export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100" + export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT" else export TOPIC_READ="" export TOPIC_WRITE="" + export GENERIC_TOPICS_UPLOAD_BASEURL="" fi #Check if onap namespace exists, if not create it @@ -473,30 +650,29 @@ start_mr() { fi - - echo " Retrieving host and ports for service..." - MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE) - - MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http") - MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https") - - echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT" - if [ $MR_HTTPX == "http" ]; then - MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT - if [ -z "$MR_SERVICE_PATH" ]; then - MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT - fi - else - MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT - if [ -z "$MR_SERVICE_PATH" ]; then - MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT - fi - fi - MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT - MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT - - MR_STUB_ADAPTER=$MR_STUB_PATH - MR_STUB_ADAPTER_TYPE="REST" + # echo " Retrieving host and ports for service..." + # MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE) + + # MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http") + # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https") + + # echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT" + # if [ $MR_HTTPX == "http" ]; then + # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT + # if [ -z "$MR_SERVICE_PATH" ]; then + # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT + # fi + # else + # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT + # if [ -z "$MR_SERVICE_PATH" ]; then + # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT + # fi + # fi + # MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT + # MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT + + # MR_STUB_ADAPTER=$MR_STUB_PATH + # MR_STUB_ADAPTER_TYPE="REST" __check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL @@ -532,26 +708,55 @@ start_mr() { export TOPIC_READ="" export TOPIC_WRITE="" + export GENERIC_TOPICS_UPLOAD_BASEURL="" if [ $retcode_dmaapmr -eq 0 ]; then # Set topics for dmaap export TOPIC_READ="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC" export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100" + export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT" fi __dmaapmr_export_vars if [ $retcode_dmaapmr -eq 0 ]; then + + # copy config files + MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR + cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH + + # substitute vars + configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties + cp $configfile $configfile"_tmp" + envsubst < $configfile"_tmp" > $configfile + __start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL - __create_topic $MR_READ_TOPIC "Topic for reading policy messages" + # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below + #__create_topic $MR_READ_TOPIC "Topic for reading policy messages" + + #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages" + + #__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100" - __create_topic $MR_WRITE_TOPIC "Topic for writing policy messages" + #__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100" - __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100" + if [ $# -gt 0 ]; then + if [ $(($#%3)) -eq 0 ]; then + while [ $# -gt 0 ]; do + __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100" + shift; shift; shift; + done + else + echo -e $RED" args: start_mr [ ]*"$ERED + echo -e $RED" Got: $@"$ERED + exit 1 + fi + fi - __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100" + #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100" + #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100" echo " Current topics:" curlString="$MR_DMAAP_PATH/topics" @@ -575,23 +780,25 @@ start_mr() { # Create a dmaap mr topic # args: __create_topic() { - echo -ne " Creating read topic: $1"$SAMELINE + echo -ne " Creating topic: $1"$SAMELINE json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}" - echo $json_topic > ./tmp/$1.json + fname="./tmp/$1.json" + echo $json_topic > $fname - curlString="$MR_DMAAP_PATH/topics/create -X POST -H Content-Type:application/json -d@./tmp/$1.json" - topic_retries=5 + query="/topics/create" + topic_retries=10 while [ $topic_retries -gt 0 ]; do let topic_retries=topic_retries-1 - result=$(__do_curl "$curlString") - if [ $? -eq 0 ]; then + res="$(__do_curl_to_api DMAAPMR POST $query $fname)" + status=${res:${#res}-3} + + if [[ $status == "2"* ]]; then topic_retries=0 - echo -e " Creating read topic: $1 $GREEN OK $EGREEN" - fi - if [ $? -ne 0 ]; then + echo -e " Creating topic: $1 $GREEN OK $EGREEN" + else if [ $topic_retries -eq 0 ]; then - echo -e " Creating read topic: $1 $RED Failed $ERED" + echo -e " Creating topic: $1 $RED Failed $ERED" ((RES_CONF_FAIL++)) return 1 else @@ -599,18 +806,27 @@ __create_topic() { fi fi done + echo return 0 } # Do a pipeclean of a topic - to overcome dmaap mr bug... -# args: +# args: [] __dmaap_pipeclean() { pipeclean_retries=50 + if [ $# -eq 4 ]; then + pipeclean_retries=$4 + fi echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE while [ $pipeclean_retries -gt 0 ]; do - echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json + if [[ $1 == *".text" ]]; then + echo "pipeclean-$1:$pipeclean_retries" > ./tmp/__dmaap_pipeclean.txt + curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:text/plain -d@./tmp/__dmaap_pipeclean.txt" + else + echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/__dmaap_pipeclean.json + curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/__dmaap_pipeclean.json" + fi let pipeclean_retries=pipeclean_retries-1 - curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/pipeclean.json" result=$(__do_curl "$curlString") if [ $? -ne 0 ]; then sleep 1 @@ -688,7 +904,7 @@ mr_print() { # arg: # (Function for test scripts) mr_api_send_json() { - __log_test_start $@ + __log_conf_start $@ if [ $# -ne 2 ]; then __print_err " " $@ return 1 @@ -700,10 +916,139 @@ mr_api_send_json() { status=${res:${#res}-3} if [ $status -ne 200 ]; then - __log_test_fail_status_code 200 $status + __log_conf_fail_status_code 200 $status + return 1 + fi + + __log_conf_ok + return 0 +} + +# Send text to topic in mr-stub. +# arg: +# (Function for test scripts) +mr_api_send_text() { + __log_conf_start $@ + if [ $# -ne 2 ]; then + __print_err " " $@ + return 1 + fi + query=$1 + fname=$PWD/tmp/text_payload_to_mr.txt + echo $2 > $fname + res="$(__do_curl_to_api MRSTUB POST $query $fname text/plain)" + + status=${res:${#res}-3} + if [ $status -ne 200 ]; then + __log_conf_fail_status_code 200 $status + return 1 + fi + + __log_conf_ok + return 0 +} + +# Send json file to topic in mr-stub. +# arg: +# (Function for test scripts) +mr_api_send_json_file() { + __log_conf_start $@ + if [ $# -ne 2 ]; then + __print_err " " $@ + return 1 + fi + query=$1 + if [ ! -f $2 ]; then + __log_test_fail_general "File $2 does not exist" + return 1 + fi + #Create json array for mr + datafile="tmp/mr_api_send_json_file.json" + { echo -n "[" ; cat $2 ; echo -n "]" ;} > $datafile + + res="$(__do_curl_to_api MRSTUB POST $query $datafile)" + + status=${res:${#res}-3} + if [ $status -ne 200 ]; then + __log_conf_fail_status_code 200 $status + return 1 + fi + + __log_conf_ok + return 0 +} + +# Send text file to topic in mr-stub. +# arg: +# (Function for test scripts) +mr_api_send_text_file() { + __log_conf_start $@ + if [ $# -ne 2 ]; then + __print_err " " $@ + return 1 + fi + query=$1 + if [ ! -f $2 ]; then + __log_test_fail_general "File $2 does not exist" + return 1 + fi + + res="$(__do_curl_to_api MRSTUB POST $query $2 text/plain)" + + status=${res:${#res}-3} + if [ $status -ne 200 ]; then + __log_conf_fail_status_code 200 $status return 1 fi - __log_test_pass + __log_conf_ok + return 0 +} + +# Create json file for payload +# arg: +mr_api_generate_json_payload_file() { + __log_conf_start $@ + if [ $# -ne 2 ]; then + __print_err " " $@ + return 1 + fi + if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then + __log_conf_fail_general "Only size between 1k and 10000k supported" + return 1 + fi + echo -n "{\"a\":[" > $2 + LEN=$(($1*150)) + echo -n "\"a0\"" >> $2 + for ((idx=1; idx<$LEN; idx++)) + do + echo -n ",\"a$idx\"" >> $2 + done + echo -n "]}" >> $2 + + __log_conf_ok + return 0 +} + +# Create tet file for payload +# arg: +mr_api_generate_text_payload_file() { + __log_conf_start $@ + if [ $# -ne 2 ]; then + __print_err " " $@ + return 1 + fi + if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then + __log_conf_fail_general "Only size between 1k and 10000k supported" + return 1 + fi + echo -n "" > $2 + LEN=$(($1*100)) + for ((idx=0; idx<$LEN; idx++)) + do + echo -n "ABCDEFGHIJ" >> $2 + done + + __log_conf_ok return 0 } diff --git a/test/common/prodstub_api_functions.sh b/test/common/prodstub_api_functions.sh index bb4ccf59..6c3ce234 100644 --- a/test/common/prodstub_api_functions.sh +++ b/test/common/prodstub_api_functions.sh @@ -107,6 +107,18 @@ __PRODSTUB_initial_setup() { use_prod_stub_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__PRODSTUB_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "PRODSTUB $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE" + else + echo "PRODSTUB $PROD_STUB_APP_NAME" + fi +} + ####################################################### # Set http as the protocol to use for all communication to the Prod stub sim diff --git a/test/common/pvccleaner_api_functions.sh b/test/common/pvccleaner_api_functions.sh index 62c2d43d..5d37bd0d 100644 --- a/test/common/pvccleaner_api_functions.sh +++ b/test/common/pvccleaner_api_functions.sh @@ -90,6 +90,14 @@ __PVCCLEANER_initial_setup() { : } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__PVCCLEANER_statisics_setup() { + echo "" +} + ####################################################### # This is a system app, all usage in testcase_common.sh \ No newline at end of file diff --git a/test/common/rapp_catalogue_api_functions.sh b/test/common/rapp_catalogue_api_functions.sh index 52416d34..537bc0c2 100644 --- a/test/common/rapp_catalogue_api_functions.sh +++ b/test/common/rapp_catalogue_api_functions.sh @@ -84,6 +84,18 @@ __RC_initial_setup() { use_rapp_catalogue_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__RC_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "RC $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE" + else + echo "RC $RAPP_CAT_APP_NAME" + fi +} + ####################################################### # Set http as the protocol to use for all communication to the Rapp catalogue diff --git a/test/common/ricsimulator_api_functions.sh b/test/common/ricsimulator_api_functions.sh index f7603130..695b5358 100644 --- a/test/common/ricsimulator_api_functions.sh +++ b/test/common/ricsimulator_api_functions.sh @@ -91,6 +91,18 @@ __RICSIM_initial_setup() { use_simulator_http } +# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers +# For docker, the namespace shall be excluded +# This function is called for apps managed by the test script as well as for prestarted apps. +# args: - +__RICSIM_statisics_setup() { + if [ $RUNMODE == "KUBE" ]; then + echo "" + else + echo "" + fi +} + ####################################################### diff --git a/test/common/test_env-onap-guilin.sh b/test/common/test_env-onap-guilin.sh index 8344f38c..6cb18f50 100755 --- a/test/common/test_env-onap-guilin.sh +++ b/test/common/test_env-onap-guilin.sh @@ -161,9 +161,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net" # Name of docker privat KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) -KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) +KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -189,7 +189,7 @@ POLICY_AGENT_CONFIG_FILE="application.yaml" # Container config file POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -210,10 +210,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost - +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback Reciever" @@ -222,6 +224,8 @@ CR_INTERNAL_PORT=8090 # Callback receiver con CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container) CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_CALLBACK="/callbacks" # Url for callbacks +CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -310,6 +314,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731 # Kube Http Proxy conta KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container) KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container) KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container + +KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container) + KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose diff --git a/test/common/test_env-onap-honolulu.sh b/test/common/test_env-onap-honolulu.sh index 00e5d4bb..c2934206 100755 --- a/test/common/test_env-onap-honolulu.sh +++ b/test/common/test_env-onap-honolulu.sh @@ -185,9 +185,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net" # Name of docker privat KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) -KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) +KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -233,7 +233,7 @@ ECS_CONFIG_FILE=application.yaml # Config file name ECS_VERSION="V1-2" # Version where the types are added in the producer registration ECS_FEATURE_LEVEL="" # Space separated list of features -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -254,9 +254,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback Reciever" @@ -266,6 +269,8 @@ CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver con CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_APP_CALLBACK="/callbacks" # Url for callbacks +CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -378,6 +383,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731 # Kube Http Proxy conta KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container) KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container) KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container + +KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container) + KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose diff --git a/test/common/test_env-onap-istanbul.sh b/test/common/test_env-onap-istanbul.sh index f8c411f1..5b111371 100644 --- a/test/common/test_env-onap-istanbul.sh +++ b/test/common/test_env-onap-istanbul.sh @@ -69,10 +69,10 @@ NEXUS_RELEASE_REPO_ONAP=$NEXUS_RELEASE_REPO # Policy Agent image and tags POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice" -POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT" -POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT" -POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo -POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0" +POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT" +POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT" +POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo +POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3" # SDNC A1 Controller remote image and tag SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image" @@ -146,17 +146,17 @@ HTTP_PROXY_IMAGE_TAG_LOCAL="latest" #ONAP Zookeeper remote image and tag ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper" -ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3" +ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0" #No local image for ONAP Zookeeper, remote image always used #ONAP Kafka remote image and tag ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111" -ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4" +ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1" #No local image for ONAP Kafka, remote image always used #ONAP DMAAP-MR remote image and tag ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr" -ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18" +ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0" #No local image for ONAP DMAAP-MR, remote image always used #Kube proxy remote image and tag @@ -188,9 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net" # Name of docker privat KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) -KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) +KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -236,7 +236,7 @@ ECS_CONFIG_FILE=application.yaml # Config file name ECS_VERSION="V1-2" # Version where the types are added in the producer registration ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -257,9 +257,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback Reciever" @@ -269,6 +272,8 @@ CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver con CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_APP_CALLBACK="/callbacks" # Url for callbacks +CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -397,6 +402,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731 # Kube Http Proxy conta KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container) KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container) KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container + +KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container) + KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose diff --git a/test/common/test_env-oran-cherry.sh b/test/common/test_env-oran-cherry.sh index 43077eac..641aabeb 100755 --- a/test/common/test_env-oran-cherry.sh +++ b/test/common/test_env-oran-cherry.sh @@ -188,9 +188,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net" # Name of docker privat KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) -KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) +KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -236,7 +236,7 @@ ECS_CONFIG_FILE=application.yaml # Config file name ECS_VERSION="V1-2" # Version where the types are added in the producer registration ECS_FEATURE_LEVEL="" # Space separated list of features -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -257,10 +257,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost - +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback Reciever" @@ -269,6 +271,8 @@ CR_INTERNAL_PORT=8090 # Callback receiver con CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container) CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_CALLBACK="/callbacks" # Url for callbacks +CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -378,6 +382,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731 # Kube Http Proxy conta KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container) KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container) KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container + +KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container) + KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose diff --git a/test/common/test_env-oran-d-release.sh b/test/common/test_env-oran-d-release.sh index cc510d5a..18f7e177 100755 --- a/test/common/test_env-oran-d-release.sh +++ b/test/common/test_env-oran-d-release.sh @@ -207,9 +207,9 @@ DOCKER_SIM_NWNAME="nonrtric-docker-net" # Name of docker privat KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) -KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) +KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -255,7 +255,7 @@ ECS_CONFIG_FILE=application.yaml # Config file name ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -276,10 +276,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost - +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback receiver" @@ -288,6 +290,8 @@ CR_INTERNAL_PORT=8090 # Callback receiver con CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container) CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_CALLBACK="/callbacks" # Url for callbacks +CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -441,6 +445,12 @@ KUBE_PROXY_WEB_EXTERNAL_PORT=8731 # Kube Http Proxy conta KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container) KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container) KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container + +KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container) +KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container) + KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose diff --git a/test/common/test_env-oran-e-release.sh b/test/common/test_env-oran-e-release.sh index e2b53da9..546e94cf 100755 --- a/test/common/test_env-oran-e-release.sh +++ b/test/common/test_env-oran-e-release.sh @@ -235,7 +235,7 @@ KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all non KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM) KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM) KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router) -KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc +KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container) POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container) @@ -281,7 +281,7 @@ ECS_CONFIG_FILE=application.yaml # Config file name ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration ECS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO" # Space separated list of features -MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR +MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR MR_STUB_APP_NAME="mr-stub" # Name of the MR stub MR_DMAAP_DISPLAY_NAME="DMAAP Message Router" MR_STUB_DISPLAY_NAME="Message Router stub" @@ -302,10 +302,12 @@ MR_STUB_ALIVE_URL="/" # Base path for mr stub MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose -MR_KAFKA_APP_NAME="kafka" # Kafka app name +MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start... +MR_KAFKA_PORT=9092 # Kafka port number MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name -MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost - +MR_ZOOKEEPER_PORT="2181" # Zookeeper port number +MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files +MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost CR_APP_NAME="callback-receiver" # Name for the Callback receiver CR_DISPLAY_NAME="Callback receiver" @@ -315,6 +317,7 @@ CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver con CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container) CR_APP_CALLBACK="/callbacks" # Url for callbacks CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr) +CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data) CR_ALIVE_URL="/" # Base path for alive check CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose @@ -478,6 +481,10 @@ KUBE_PROXY_PATH="" # Proxy url path, will KUBE_PROXY_ALIVE_URL="/" # Base path for alive check KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose +PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container +PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner +PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls + DMAAP_ADP_APP_NAME="dmaapadapterservice" # Name for Dmaap Adapter container DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service" # Display name for Dmaap Adapter container DMAAP_ADP_EXTERNAL_PORT=9087 # Dmaap Adapter container external port (host -> container) @@ -511,18 +518,13 @@ DMAAP_MED_HOST_MNT_DIR="./mnt" # Mounted db dir, relati #DMAAP_MED_CERT_MOUNT_DIR="./cert" DMAAP_MED_ALIVE_URL="/status" # Base path for alive check DMAAP_MED_COMPOSE_DIR="dmaapmed" # Dir in simulator_group for docker-compose -#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration -DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file -DMAAP_MED_DATA_FILE="type_config.json" # Container data file name -#DMAAP_MED_CONFIG_FILE=application.yaml # Config file name - -PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container -PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner -PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls +#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration +DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file +DMAAP_MED_DATA_FILE="type_config.json" # Container data file name ######################################## # Setting for common curl-base function ######################################## -UUID="" # UUID used as prefix to the policy id to simulate a real UUID - # Testscript need to set the UUID otherwise this empty prefix is used +UUID="" # UUID used as prefix to the policy id to simulate a real UUID + # Testscript need to set the UUID otherwise this empty prefix is used diff --git a/test/common/testcase_common.sh b/test/common/testcase_common.sh index 8d832d73..78eeb540 100755 --- a/test/common/testcase_common.sh +++ b/test/common/testcase_common.sh @@ -28,7 +28,7 @@ __print_args() { echo " [--ricsim-prefix ] [--use-local-image +] [--use-snapshot-image +]" echo " [--use-staging-image +] [--use-release-image +] [--image-repo ] [--print-stats]" - echo " [--override --pre-clean]" + echo " [--override --pre-clean --gen-stats]" } if [ $# -eq 1 ] && [ "$1" == "help" ]; then @@ -59,6 +59,7 @@ if [ $# -eq 1 ] && [ "$1" == "help" ]; then echo "--print-stats - Print current test stats after each test." echo "--override - Override setting from the file supplied by --env-file" echo "--pre-clean - Will clean kube resouces when running docker and vice versa" + echo "--gen-stats - Collect container/pod runtime statistics" echo "" echo "List of app short names supported: "$APP_SHORT_NAMES @@ -207,6 +208,9 @@ RES_DEVIATION=0 #Var to control if current stats shall be printed PRINT_CURRENT_STATS=0 +#Var to control if container/pod runtim statistics shall be collected +COLLECT_RUNTIME_STATS=0 + #File to keep deviation messages DEVIATION_FILE=".tmp_deviations" rm $DEVIATION_FILE &> /dev/null @@ -222,6 +226,9 @@ trap_fnc() { } trap trap_fnc ERR +# Trap to kill subprocesses +trap "kill 0" EXIT + # Counter for tests TEST_SEQUENCE_NR=1 @@ -652,6 +659,15 @@ while [ $paramerror -eq 0 ] && [ $foundparm -eq 0 ]; do foundparm=0 fi fi + if [ $paramerror -eq 0 ]; then + if [ "$1" == "--gen-stats" ]; then + COLLECT_RUNTIME_STATS=1 + echo "Option set - Collect runtime statistics" + shift; + foundparm=0 + fi + fi + done echo "" @@ -768,7 +784,7 @@ if [ $? -ne 0 ] || [ -z tmp ]; then fi fi if [ $RUNMODE == "DOCKER" ]; then - tmp=$(docker-compose version | grep -i 'Docker Compose version') + tmp=$(docker-compose version | grep -i 'docker' | grep -i 'compose' | grep -i 'version') if [[ "$tmp" == *'v2'* ]]; then echo -e $RED"docker-compose is using docker-compose version 2"$ERED echo -e $RED"The test environment only support version 1"$ERED @@ -1449,6 +1465,8 @@ setup_testenvironment() { echo -e $BOLD"======================================================="$EBOLD echo "" + LOG_STAT_ARGS="" + for imagename in $APP_SHORT_NAMES; do __check_included_image $imagename retcode_i=$? @@ -1464,9 +1482,16 @@ setup_testenvironment() { function_pointer="__"$imagename"_initial_setup" $function_pointer + + function_pointer="__"$imagename"_statisics_setup" + LOG_STAT_ARGS=$LOG_STAT_ARGS" "$($function_pointer) fi done + if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then + ../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS & + fi + } # Function to print the test result, shall be the last cmd in a test script @@ -1498,8 +1523,16 @@ print_result() { echo "Timer measurement in the test script" echo "====================================" column -t -s $'\t' $TIMER_MEASUREMENTS + if [ $RES_PASS != $RES_TEST ]; then + echo -e $RED"Measurement may not be reliable when there are failed test - script timeouts may cause long measurement values"$ERED + fi echo "" + if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then + echo "Runtime statistics collected in file: "$TESTLOGS/$ATC/stat_data.csv + echo "" + fi + total=$((RES_PASS+RES_FAIL)) if [ $RES_TEST -eq 0 ]; then echo -e "\033[1mNo tests seem to have been executed. Check the script....\033[0m" @@ -2142,41 +2175,6 @@ __kube_create_configmap() { return 0 } -# Function to create a configmap in kubernetes -# args: -# (Not for test scripts) -__kube_create_configmapXXXXXXXXXXXXX() { - echo -ne " Creating configmap $1 "$SAMELINE - #envsubst < $5 > $5"_tmp" - #cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name - kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6 - if [ $? -ne 0 ]; then - echo -e " Creating configmap $1 $RED Failed $ERED" - ((RES_CONF_FAIL++)) - return 1 - fi - - kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr - if [ $? -ne 0 ]; then - echo -e " Creating configmap $1 $RED Apply failed $ERED" - echo " Message: $(<./tmp/kubeerr)" - ((RES_CONF_FAIL++)) - return 1 - fi - kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr - if [ $? -ne 0 ]; then - echo -e " Creating configmap $1 $RED Labeling failed $ERED" - echo " Message: $(<./tmp/kubeerr)" - ((RES_CONF_FAIL++)) - return 1 - fi - # Log the resulting map - kubectl get configmap $1 -n $2 -o yaml > $6 - - echo -e " Creating configmap $1 $GREEN OK $EGREEN" - return 0 -} - # This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter. # The function retries up to the timeout given in the cmd flag '--cluster-timeout' # args: @@ -2294,12 +2292,14 @@ clean_environment() { if [ $PRE_CLEAN -eq 1 ]; then echo " Clean docker resouces to free up resources, may take time..." ../common/clean_docker.sh 2&>1 /dev/null + echo "" fi else __clean_containers if [ $PRE_CLEAN -eq 1 ]; then - echo " Clean kubernetes resouces to free up resources, may take time..." + echo " Cleaning kubernetes resouces to free up resources, may take time..." ../common/clean_kube.sh 2&>1 /dev/null + echo "" fi fi } diff --git a/test/cr/app/cr.py b/test/cr/app/cr.py index 4b4d8daf..94ef606d 100644 --- a/test/cr/app/cr.py +++ b/test/cr/app/cr.py @@ -25,6 +25,7 @@ import traceback import logging import socket from threading import RLock +from hashlib import md5 # Disable all logging of GET on reading counters and db class AjaxFilter(logging.Filter): @@ -54,6 +55,7 @@ hosts_set=set() # Request and response constants CALLBACK_URL="/callbacks/" CALLBACK_MR_URL="/callbacks-mr/" #Json list with string encoded items +CALLBACK_TEXT_URL="/callbacks-text/" # Callback for string of text APP_READ_URL="/get-event/" APP_READ_ALL_URL="/get-all-events/" DUMP_ALL_URL="/db" @@ -111,7 +113,14 @@ def receiveresponse(id): cntr_callbacks[id][1]+=1 msg=msg_callbacks[id][0] print("Fetching msg for id: "+id+", msg="+str(msg)) - del msg[TIME_STAMP] + + if (isinstance(msg,dict)): + del msg[TIME_STAMP] + if ("md5" in msg.keys()): + print("EXTRACTED MD5") + msg=msg["md5"] + print("MD5: "+str(msg)) + del msg_callbacks[id][0] return json.dumps(msg),200 print("No messages for id: "+id) @@ -139,7 +148,8 @@ def receiveresponse_all(id): msg=msg_callbacks[id] print("Fetching all msgs for id: "+id+", msg="+str(msg)) for sub_msg in msg: - del sub_msg[TIME_STAMP] + if (isinstance(sub_msg, dict)): + del sub_msg[TIME_STAMP] del msg_callbacks[id] return json.dumps(msg),200 print("No messages for id: "+id) @@ -180,7 +190,8 @@ def events_write(id): with lock: cntr_msg_callbacks += 1 - msg[TIME_STAMP]=str(datetime.now()) + if (isinstance(msg, dict)): + msg[TIME_STAMP]=str(datetime.now()) if (id in msg_callbacks.keys()): msg_callbacks[id].append(msg) else: @@ -202,8 +213,9 @@ def events_write(id): return 'OK',200 -# Receive a json callback message with payload fromatted accoirding to output frm the message router -# URI and payload, (PUT or POST): /callbacks/ +# Receive a json callback message with payload formatted according to output from the message router +# Array of stringified json objects +# URI and payload, (PUT or POST): /callbacks-mr/ # json is a list of string encoded json items # response: OK 200 or 500 for other errors @app.route(CALLBACK_MR_URL, @@ -212,17 +224,21 @@ def events_write_mr(id): global msg_callbacks global cntr_msg_callbacks + storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload + #Large payloads will otherwise overload the server try: print("Received callback (mr) for id: "+id +", content-type="+request.content_type) - remote_host_logging(request) print("raw data: str(request.data): "+str(request.data)) + if (storeas is None): + print("raw data: str(request.data): "+str(request.data)) do_delay() try: #if (request.content_type == MIME_JSON): if (MIME_JSON in request.content_type): data = request.data msg_list = json.loads(data) - print("Payload(json): "+str(msg_list)) + if (storeas is None): + print("Payload(json): "+str(msg_list)) else: msg_list=[] print("Payload(content-type="+request.content_type+"). Setting empty json as payload") @@ -234,11 +250,21 @@ def events_write_mr(id): with lock: remote_host_logging(request) for msg in msg_list: - print("msg (str): "+str(msg)) - msg=json.loads(msg) - print("msg (json): "+str(msg)) + if (storeas is None): + msg=json.loads(msg) + else: + #Convert to compact json without ws between parameter and value... + #It seem that ws is added somewhere along to way to this server + msg=json.loads(msg) + msg=json.dumps(msg, separators=(',', ':')) + + md5msg={} + md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest() + msg=md5msg + print("msg (json converted to md5 hash): "+str(msg["md5"])) cntr_msg_callbacks += 1 - msg[TIME_STAMP]=str(datetime.now()) + if (isinstance(msg, dict)): + msg[TIME_STAMP]=str(datetime.now()) if (id in msg_callbacks.keys()): msg_callbacks[id].append(msg) else: @@ -259,6 +285,73 @@ def events_write_mr(id): return 'OK',200 +# Receive a callback message of a single text message (content type ignored) +# or a json array of strings (content type json) +# URI and payload, (PUT or POST): /callbacks-text/ +# response: OK 200 or 500 for other errors +@app.route(CALLBACK_TEXT_URL, + methods=['PUT','POST']) +def events_write_text(id): + global msg_callbacks + global cntr_msg_callbacks + + storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload + #Large payloads will otherwise overload the server + try: + print("Received callback for id: "+id +", content-type="+request.content_type) + remote_host_logging(request) + if (storeas is None): + print("raw data: str(request.data): "+str(request.data)) + do_delay() + + try: + msg_list=None + if (MIME_JSON in request.content_type): #Json array of strings + msg_list=json.loads(request.data) + else: + data=request.data.decode("utf-8") #Assuming string + msg_list=[] + msg_list.append(data) + + for msg in msg_list: + if (storeas == "md5"): + md5msg={} + print("msg: "+str(msg)) + print("msg (endcode str): "+str(msg.encode('utf-8'))) + md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest() + msg=md5msg + print("msg (data converted to md5 hash): "+str(msg["md5"])) + + if (isinstance(msg, dict)): + msg[TIME_STAMP]=str(datetime.now()) + + with lock: + cntr_msg_callbacks += 1 + if (id in msg_callbacks.keys()): + msg_callbacks[id].append(msg) + else: + msg_callbacks[id]=[] + msg_callbacks[id].append(msg) + + if (id in cntr_callbacks.keys()): + cntr_callbacks[id][0] += 1 + else: + cntr_callbacks[id]=[] + cntr_callbacks[id].append(1) + cntr_callbacks[id].append(0) + except Exception as e: + print(CAUGHT_EXCEPTION+str(e)) + traceback.print_exc() + return 'NOTOK',500 + + + except Exception as e: + print(CAUGHT_EXCEPTION+str(e)) + traceback.print_exc() + return 'NOTOK',500 + + return 'OK',200 + ### Functions for test ### # Dump the whole db of current callbacks diff --git a/test/cr/app/nginx.conf b/test/cr/app/nginx.conf index e1b9ff9a..32beca1c 100644 --- a/test/cr/app/nginx.conf +++ b/test/cr/app/nginx.conf @@ -43,7 +43,10 @@ http { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://localhost:2222; + + client_max_body_size 0; } + } ## # SSL Settings diff --git a/test/mrstub/app/main.py b/test/mrstub/app/main.py index fb6d6748..4b1913f7 100644 --- a/test/mrstub/app/main.py +++ b/test/mrstub/app/main.py @@ -69,11 +69,13 @@ SERVER_ERROR="Server error :" topic_write="" topic_read="" +generic_topics_upload_baseurl="" uploader_thread=None downloader_thread=None +generic_uploader_thread=None -# Function to download messages from dmaap +# Function to upload PMS messages to dmaap def dmaap_uploader(): global msg_requests global cntr_msg_requests_fetched @@ -107,7 +109,7 @@ def dmaap_uploader(): sleep(0.01) -# Function to upload messages to dmaap +# Function to download PMS messages from dmaap def dmaap_downloader(): global msg_responses global cntr_msg_responses_submitted @@ -150,6 +152,48 @@ def dmaap_downloader(): except Exception as e: sleep(1) +# Function to upload generic messages to dmaap +def dmaap_generic_uploader(): + global msg_requests + global cntr_msg_requests_fetched + + print("Starting generic uploader") + + headers_json = {'Content-type': 'application/json', 'Accept': '*/*'} + headers_text = {'Content-type': 'text/plain', 'Accept': '*/*'} + + while True: + if (len(generic_messages)): + for topicname in generic_messages.keys(): #topicname contains the path of the topics, eg. "/event/" + topic_queue=generic_messages[topicname] + if (len(topic_queue)>0): + if (topicname.endswith(".text")): + msg=topic_queue[0] + headers=headers_text + else: + msg=topic_queue[0] + msg=json.dumps(msg) + headers=headers_json + url=generic_topics_upload_baseurl+topicname + print("Sending to dmaap : "+ url) + print("Sending to dmaap : "+ msg) + print("Sending to dmaap : "+ str(headers)) + try: + resp=requests.post(url, data=msg, headers=headers, timeout=10) + if (resp.status_code<199 & resp.status_code > 299): + print("Failed, response code: " + str(resp.status_code)) + sleep(1) + else: + print("Dmaap response code: " + str(resp.status_code)) + print("Dmaap response text: " + str(resp.text)) + with lock: + topic_queue.pop(0) + cntr_msg_requests_fetched += 1 + except Exception as e: + print("Failed, exception: "+ str(e)) + sleep(1) + sleep(0.01) + #I'm alive function @app.route('/', methods=['GET']) @@ -157,7 +201,7 @@ def index(): return 'OK', 200 -# Helper function to create a Dmaap request message +# Helper function to create a Dmaap PMS request message # args : # response: json formatted string of a complete Dmaap message def create_message(operation, correlation_id, payload, url): @@ -171,7 +215,7 @@ def create_message(operation, correlation_id, payload, url): ### MR-stub interface, for MR control -# Send a message to MR +# Send a PMS message to MR # URI and parameters (PUT or POST): /send-request?operation=&url= # response: (http 200) o4 400 for parameter error or 500 for other errors @app.route(APP_WRITE_URL, @@ -212,7 +256,7 @@ def sendrequest(): print(APP_WRITE_URL+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc()) return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT) -# Receive a message response for MR for the included correlation id +# Receive a PMS message response for MR for the included correlation id # URI and parameter, (GET): /receive-response?correlationid= # response: 200 or empty 204 or other errors 500 @app.route(APP_READ_URL, @@ -243,7 +287,7 @@ def receiveresponse(): ### Dmaap interface ### -# Read messages stream. URI according to agent configuration. +# Read PMS messages stream. URI according to agent configuration. # URI, (GET): /events/A1-POLICY-AGENT-READ/users/policy-agent # response: 200 , or 500 for other errors @app.route(AGENT_READ_URL, @@ -299,7 +343,7 @@ def events_read(): print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time)) return Response("[]", status=200, mimetype=MIME_JSON) -# Write messages stream. URI according to agent configuration. +# Write PMS messages stream. URI according to agent configuration. # URI and payload, (PUT or POST): /events/A1-POLICY-AGENT-WRITE # response: OK 200 or 400 for missing json parameters, 500 for other errors @app.route(AGENT_WRITE_URL, @@ -367,10 +411,10 @@ def oru_read(): return Response(json.dumps(res), status=200, mimetype=MIME_JSON) return Response("[]", status=200, mimetype=MIME_JSON) -# Generic POST/PUT catching all urls starting with /events/. +# Generic POST catching all urls starting with /events/. # Writes the message in a que for that topic @app.route("/events/", - methods=['PUT','POST']) + methods=['POST']) def generic_write(path): global generic_messages global cntr_msg_responses_submitted @@ -378,8 +422,12 @@ def generic_write(path): write_method=str(request.method) with lock: try: - payload=request.json - print(write_method+" on "+urlkey+" json=" + json.dumps(payload)) + if (urlkey.endswith(".text")): + payload=str(request.data.decode('UTF-8')) + print(write_method+" on "+urlkey+" text=" + payload) + else: + payload=request.json + print(write_method+" on "+urlkey+" json=" + json.dumps(payload)) topicmsgs=[] if (urlkey in generic_messages.keys()): topicmsgs=generic_messages[urlkey] @@ -407,6 +455,9 @@ def generic_read(path): global generic_messages global cntr_msg_requests_fetched + if generic_topics_upload_baseurl: + return Response('Url not available when running as mrstub frontend', status=404, mimetype=MIME_TEXT) + urlpath="/events/"+str(path) urlkey="/events/"+str(path).split("/")[0] #Extract topic print("GET on topic"+urlkey) @@ -530,7 +581,14 @@ if os.getenv("TOPIC_READ") is not None: uploader_thread=Thread(target=dmaap_uploader) uploader_thread.start() -else: +if os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is not None: + print("GENERIC_TOPICS_UPLOAD_BASEURL:"+os.environ['GENERIC_TOPICS_UPLOAD_BASEURL']) + generic_topics_upload_baseurl=os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] + if generic_topics_upload_baseurl and generic_uploader_thread is None: + generic_uploader_thread=Thread(target=dmaap_generic_uploader) + generic_uploader_thread.start() + +if os.getenv("TOPIC_READ") is None or os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is None: print("No env variables - OK") if __name__ == "__main__": diff --git a/test/mrstub/app/nginx.conf b/test/mrstub/app/nginx.conf index c548e566..35b5ba0c 100644 --- a/test/mrstub/app/nginx.conf +++ b/test/mrstub/app/nginx.conf @@ -39,7 +39,8 @@ http { # serve dynamic requests location / { - proxy_pass http://localhost:2222; + proxy_pass http://localhost:2222; + client_max_body_size 0; } } ## diff --git a/test/simulator-group/dmaapadp/application.yaml b/test/simulator-group/dmaapadp/application.yaml index b20a9d77..f96db091 100644 --- a/test/simulator-group/dmaapadp/application.yaml +++ b/test/simulator-group/dmaapadp/application.yaml @@ -68,4 +68,7 @@ app: configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json dmaap-base-url: $MR_SERVICE_PATH # The url used to adress this component. This is used as a callback url sent to other components. - dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH \ No newline at end of file + dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH + # KAFKA boostrap server. This is only needed if there are Information Types that uses a kafkaInputTopic + kafka: + bootstrap-servers: $MR_KAFKA_SERVICE_PATH diff --git a/test/simulator-group/dmaapadp/application_configuration.json b/test/simulator-group/dmaapadp/application_configuration.json index b6605e35..e36d910d 100644 --- a/test/simulator-group/dmaapadp/application_configuration.json +++ b/test/simulator-group/dmaapadp/application_configuration.json @@ -2,8 +2,13 @@ "types": [ { "id": "ExampleInformationType", - "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs", + "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=15000&limit=100", "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG} - } + }, + { + "id": "ExampleInformationTypeKafka", + "kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text", + "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG} + } ] } \ No newline at end of file diff --git a/test/simulator-group/dmaapadp/mnt/.gitignore b/test/simulator-group/dmaapadp/mnt/.gitignore new file mode 100644 index 00000000..cdf07930 --- /dev/null +++ b/test/simulator-group/dmaapadp/mnt/.gitignore @@ -0,0 +1,17 @@ +################################################################################ +# Copyright (c) 2021 Nordix Foundation. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +################################################################################ +* +!.gitignore diff --git a/test/simulator-group/dmaapmed/app.yaml b/test/simulator-group/dmaapmed/app.yaml index e0296fa9..aa8a0f18 100644 --- a/test/simulator-group/dmaapmed/app.yaml +++ b/test/simulator-group/dmaapmed/app.yaml @@ -40,7 +40,7 @@ spec: - name: DMAAP_MR_ADDR value: "$MR_SERVICE_PATH" - name: LOG_LEVEL - value: "Debug" + value: Debug volumes: - configMap: defaultMode: 420 diff --git a/test/simulator-group/dmaapmed/docker-compose.yml b/test/simulator-group/dmaapmed/docker-compose.yml index 21fe5514..d0672dfe 100644 --- a/test/simulator-group/dmaapmed/docker-compose.yml +++ b/test/simulator-group/dmaapmed/docker-compose.yml @@ -32,7 +32,7 @@ services: - INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT} - INFO_COORD_ADDR=${ECS_SERVICE_PATH} - DMAAP_MR_ADDR=${MR_SERVICE_PATH} - - LOG_LEVEL="Debug" + - LOG_LEVEL=Debug volumes: - ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE labels: diff --git a/test/simulator-group/dmaapmed/mnt/.gitignore b/test/simulator-group/dmaapmed/mnt/.gitignore new file mode 100644 index 00000000..b94353c3 --- /dev/null +++ b/test/simulator-group/dmaapmed/mnt/.gitignore @@ -0,0 +1,17 @@ +################################################################################ +# Copyright (c) 2021 Nordix Foundation. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +################################################################################ +* +!.gitignore \ No newline at end of file diff --git a/test/simulator-group/dmaapmed/type_config.json b/test/simulator-group/dmaapmed/type_config.json index 8a672264..ddb776f3 100644 --- a/test/simulator-group/dmaapmed/type_config.json +++ b/test/simulator-group/dmaapmed/type_config.json @@ -3,7 +3,7 @@ [ { "id": "STD_Fault_Messages", - "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages" + "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100" } ] } \ No newline at end of file diff --git a/test/simulator-group/dmaapmr/app.yaml b/test/simulator-group/dmaapmr/app.yaml index 2b39d151..a4ecc915 100644 --- a/test/simulator-group/dmaapmr/app.yaml +++ b/test/simulator-group/dmaapmr/app.yaml @@ -1,24 +1,24 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: $MR_DMAAP_KUBE_APP_NAME + name: $MR_DMAAP_APP_NAME namespace: $KUBE_ONAP_NAMESPACE labels: - run: $MR_DMAAP_KUBE_APP_NAME + run: $MR_DMAAP_APP_NAME autotest: DMAAPMR spec: replicas: 1 selector: matchLabels: - run: $MR_DMAAP_KUBE_APP_NAME + run: $MR_DMAAP_APP_NAME template: metadata: labels: - run: $MR_DMAAP_KUBE_APP_NAME + run: $MR_DMAAP_APP_NAME autotest: DMAAPMR spec: containers: - - name: $MR_DMAAP_KUBE_APP_NAME + - name: $MR_DMAAP_APP_NAME image: $ONAP_DMAAPMR_IMAGE imagePullPolicy: $KUBE_IMAGE_PULL_POLICY ports: @@ -33,11 +33,9 @@ spec: - mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties subPath: MsgRtrApi.properties name: dmaapmr-msg-rtr-api - volumeMounts: - mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml subPath: logback.xml name: dmaapmr-log-back - volumeMounts: - mountPath: /appl/dmaapMR1/etc/cadi.properties subPath: cadi.properties name: dmaapmr-cadi @@ -58,34 +56,34 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: $MR_KAFKA_BWDS_NAME + name: $MR_KAFKA_APP_NAME namespace: $KUBE_ONAP_NAMESPACE labels: - run: $MR_KAFKA_BWDS_NAME + run: $MR_KAFKA_APP_NAME autotest: DMAAPMR spec: replicas: 1 selector: matchLabels: - run: $MR_KAFKA_BWDS_NAME + run: $MR_KAFKA_APP_NAME template: metadata: labels: - run: $MR_KAFKA_BWDS_NAME + run: $MR_KAFKA_APP_NAME autotest: DMAAPMR spec: containers: - - name: $MR_KAFKA_BWDS_NAME + - name: $MR_KAFKA_APP_NAME image: $ONAP_KAFKA_IMAGE imagePullPolicy: $KUBE_IMAGE_PULL_POLICY ports: - name: http - containerPort: 9095 + containerPort: $MR_KAFKA_PORT env: - name: enableCadi value: 'false' - name: KAFKA_ZOOKEEPER_CONNECT - value: 'zookeeper.onap:2181' + value: '$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT' - name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS value: '40000' - name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS @@ -93,11 +91,11 @@ spec: - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT' - name: KAFKA_ADVERTISED_LISTENERS - value: 'INTERNAL_PLAINTEXT://kaka:9092' -# - name: KAFKA_ADVERTISED_LISTENERS -# value: 'INTERNAL_PLAINTEXT://localhost:9092' + value: 'INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT' - name: KAFKA_LISTENERS - value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092' + value: 'INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT' + # - name: KAFKA_LISTENERS + # value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT' - name: KAFKA_INTER_BROKER_LISTENER_NAME value: INTERNAL_PLAINTEXT - name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE @@ -105,12 +103,11 @@ spec: - name: KAFKA_OPTS value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf' - name: KAFKA_ZOOKEEPER_SET_ACL - value: 'true' + value: 'false' - name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR value: '1' - name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS value: '1' - volumeMounts: - mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf subPath: zk_client_jaas.conf @@ -146,7 +143,7 @@ spec: imagePullPolicy: $KUBE_IMAGE_PULL_POLICY ports: - name: http - containerPort: 2181 + containerPort: $MR_ZOOKEEPER_PORT env: - name: ZOOKEEPER_REPLICAS value: '1' @@ -163,7 +160,7 @@ spec: - name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL value: '24' - name: ZOOKEEPER_CLIENT_PORT - value: '2181' + value: '$MR_ZOOKEEPER_PORT' - name: KAFKA_OPTS value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl' - name: ZOOKEEPER_SERVER_ID diff --git a/test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf b/test/simulator-group/dmaapmr/configs/kafka/zk_client_jaas.conf similarity index 100% rename from test/simulator-group/dmaapmr/mnt2/kafka/zk_client_jaas.conf rename to test/simulator-group/dmaapmr/configs/kafka/zk_client_jaas.conf diff --git a/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties b/test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties similarity index 96% rename from test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties rename to test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties index 47643216..3e0b001d 100644 --- a/test/simulator-group/dmaapmr/mnt2/mr/MsgRtrApi.properties +++ b/test/simulator-group/dmaapmr/configs/mr/MsgRtrApi.properties @@ -1,6 +1,7 @@ # LICENSE_START======================================================= # org.onap.dmaap # ================================================================================ +# Copyright © 2021 Nordix Foundation. All rights reserved. # Copyright © 2017 AT&T Intellectual Property. All rights reserved. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +35,7 @@ ## ## Both Cambria and Kafka make use of Zookeeper. ## -config.zk.servers=zookeeper:2181 +config.zk.servers=$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT ############################################################################### ## @@ -45,7 +46,7 @@ config.zk.servers=zookeeper:2181 ## if you want to change request.required.acks it can take this one value #kafka.metadata.broker.list=localhost:9092,localhost:9093 #kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}} -kafka.metadata.broker.list=kafka:9092 +kafka.metadata.broker.list=$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT ##kafka.request.required.acks=-1 #kafka.client.zookeeper=${config.zk.servers} consumer.timeout.ms=100 @@ -135,7 +136,7 @@ cambria.consumer.cache.touchFreqMs=120000 cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache consumer.timeout=17 default.partitions=3 -default.replicas=3 +default.replicas=1 ############################################################################## #100mb maxcontentlength=10000 diff --git a/test/simulator-group/dmaapmr/configs/mr/cadi.properties b/test/simulator-group/dmaapmr/configs/mr/cadi.properties new file mode 100644 index 00000000..6178e421 --- /dev/null +++ b/test/simulator-group/dmaapmr/configs/mr/cadi.properties @@ -0,0 +1,38 @@ +# ============LICENSE_START=============================================== +# Copyright (C) 2021 Nordix Foundation. All rights reserved. +# ======================================================================== +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END================================================= +# + +#Removed to be disable aaf in test env +#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\ +aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1 +aaf_env=DEV +aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm + +#Removed to be disable aaf in test env +# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks +# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX + +cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile + +cadi_alias=dmaapmr@mr.dmaap.onap.org +cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12 +cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy +cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US + +cadi_loglevel=INFO +cadi_protocols=TLSv1.1,TLSv1.2 +cadi_latitude=37.78187 +cadi_longitude=-122.26147 \ No newline at end of file diff --git a/test/simulator-group/dmaapmr/mnt2/mr/logback.xml b/test/simulator-group/dmaapmr/configs/mr/logback.xml similarity index 99% rename from test/simulator-group/dmaapmr/mnt2/mr/logback.xml rename to test/simulator-group/dmaapmr/configs/mr/logback.xml index f02a2db7..e60e8daa 100644 --- a/test/simulator-group/dmaapmr/mnt2/mr/logback.xml +++ b/test/simulator-group/dmaapmr/configs/mr/logback.xml @@ -1,5 +1,6 @@