import org.oran.dmaapadapter.configuration.ApplicationConfig;
import org.oran.dmaapadapter.repository.InfoType;
import org.oran.dmaapadapter.repository.InfoTypes;
-import org.oran.dmaapadapter.repository.Jobs;
-import org.oran.dmaapadapter.tasks.DmaapTopicConsumer;
-import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory;
@Configuration
public class BeanFactory {
- private InfoTypes infoTypes;
@Value("${server.http-port}")
private int httpPort = 0;
}
@Bean
- public InfoTypes types(@Autowired ApplicationConfig appConfig, @Autowired Jobs jobs,
- @Autowired KafkaTopicConsumers kafkaConsumers) {
- if (infoTypes != null) {
- return infoTypes;
- }
-
+ public InfoTypes types(@Autowired ApplicationConfig appConfig) {
Collection<InfoType> types = appConfig.getTypes();
-
- // Start a consumer for each type
- for (InfoType type : types) {
- if (type.isDmaapTopicDefined()) {
- DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs);
- topicConsumer.start();
- }
- }
- infoTypes = new InfoTypes(types);
- kafkaConsumers.start(infoTypes);
- return infoTypes;
+ return new InfoTypes(types);
}
@Bean
this.httpProxyConfig = httpProxyConfig;
}
- public Mono<ResponseEntity<String>> postForEntity(String uri, @Nullable String body) {
+ public Mono<ResponseEntity<String>> postForEntity(String uri, @Nullable String body,
+ @Nullable MediaType contentType) {
Object traceTag = createTraceTag();
logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} POST body: {}", traceTag, body);
Mono<String> bodyProducer = body != null ? Mono.just(body) : Mono.empty();
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.post() //
- .uri(uri) //
- .contentType(MediaType.APPLICATION_JSON) //
- .body(bodyProducer, String.class);
- return retrieve(traceTag, request);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .post() //
+ .uri(uri) //
+ .contentType(contentType) //
+ .body(bodyProducer, String.class);
+ return retrieve(traceTag, request);
}
- public Mono<String> post(String uri, @Nullable String body) {
- return postForEntity(uri, body) //
- .flatMap(this::toBody);
+ public Mono<String> post(String uri, @Nullable String body, @Nullable MediaType contentType) {
+ return postForEntity(uri, body, contentType) //
+ .map(this::toBody);
}
- public Mono<String> postWithAuthHeader(String uri, String body, String username, String password) {
+ public Mono<String> postWithAuthHeader(String uri, String body, String username, String password,
+ MediaType mediaType) {
Object traceTag = createTraceTag();
logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} POST body: {}", traceTag, body);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.post() //
- .uri(uri) //
- .headers(headers -> headers.setBasicAuth(username, password)) //
- .contentType(MediaType.APPLICATION_JSON) //
- .bodyValue(body);
- return retrieve(traceTag, request) //
- .flatMap(this::toBody);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .post() //
+ .uri(uri) //
+ .headers(headers -> headers.setBasicAuth(username, password)) //
+ .contentType(mediaType) //
+ .bodyValue(body);
+ return retrieve(traceTag, request) //
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> putForEntity(String uri, String body) {
Object traceTag = createTraceTag();
logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} PUT body: {}", traceTag, body);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.put() //
- .uri(uri) //
- .contentType(MediaType.APPLICATION_JSON) //
- .bodyValue(body);
- return retrieve(traceTag, request);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .put() //
+ .uri(uri) //
+ .contentType(MediaType.APPLICATION_JSON) //
+ .bodyValue(body);
+ return retrieve(traceTag, request);
}
public Mono<ResponseEntity<String>> putForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} PUT body: <empty>", traceTag);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.put() //
- .uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient() //
+ .put() //
+ .uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> put(String uri, String body) {
return putForEntity(uri, body) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> getForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.get().uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient().get().uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> get(String uri) {
return getForEntity(uri) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> deleteForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.delete().uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient().delete().uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> delete(String uri) {
return deleteForEntity(uri) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
private Mono<ResponseEntity<String>> retrieve(Object traceTag, RequestHeadersSpec<?> request) {
}
}
- private Mono<String> toBody(ResponseEntity<String> entity) {
+ private String toBody(ResponseEntity<String> entity) {
if (entity.getBody() == null) {
- return Mono.just("");
+ return "";
} else {
- return Mono.just(entity.getBody());
+ return entity.getBody();
}
}
.build();
}
- private Mono<WebClient> getWebClient() {
+ private WebClient getWebClient() {
if (this.webClient == null) {
this.webClient = buildWebClient(baseUrl);
}
- return Mono.just(buildWebClient(baseUrl));
+ return this.webClient;
}
}
@RequestBody String body) {
try {
ProducerJobInfo request = gson.fromJson(body, ProducerJobInfo.class);
-
- logger.info("Job started callback {}", request.id);
- Job job = new Job(request.id, request.targetUri, types.getType(request.typeId), request.owner,
+ logger.debug("Job started callback {}", request.id);
+ this.jobs.addJob(request.id, request.targetUri, types.getType(request.typeId), request.owner,
request.lastUpdated, toJobParameters(request.jobData));
- this.jobs.put(job);
return new ResponseEntity<>(HttpStatus.OK);
} catch (Exception e) {
return ErrorResponse.create(e, HttpStatus.NOT_FOUND);
public ResponseEntity<Object> jobDeletedCallback( //
@PathVariable("infoJobId") String infoJobId) {
- logger.info("Job deleted callback {}", infoJobId);
+ logger.debug("Job deleted callback {}", infoJobId);
this.jobs.remove(infoJobId);
return new ResponseEntity<>(HttpStatus.OK);
}
package org.oran.dmaapadapter.repository;
+import java.time.Duration;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import lombok.Getter;
import org.immutables.gson.Gson;
+import org.oran.dmaapadapter.clients.AsyncRestClient;
public class Job {
@Gson.TypeAdapters
public static class Parameters {
- public String filter;
- public BufferTimeout bufferTimeout;
+ @Getter
+ private String filter;
+ @Getter
+ private BufferTimeout bufferTimeout;
- public Parameters() {
- }
+ private int maxConcurrency;
+
+ public Parameters() {}
- public Parameters(String filter, BufferTimeout bufferTimeout) {
+ public Parameters(String filter, BufferTimeout bufferTimeout, int maxConcurrency) {
this.filter = filter;
this.bufferTimeout = bufferTimeout;
+ this.maxConcurrency = maxConcurrency;
}
- public static class BufferTimeout {
- public BufferTimeout(int maxSize, int maxTimeMiliseconds) {
- this.maxSize = maxSize;
- this.maxTimeMiliseconds = maxTimeMiliseconds;
- }
+ public int getMaxConcurrency() {
+ return maxConcurrency == 0 ? 1 : maxConcurrency;
+ }
+ }
- public BufferTimeout() {
- }
+ @Gson.TypeAdapters
+ public static class BufferTimeout {
+ public BufferTimeout(int maxSize, long maxTimeMiliseconds) {
+ this.maxSize = maxSize;
+ this.maxTimeMiliseconds = maxTimeMiliseconds;
+ }
- public int maxSize;
- public int maxTimeMiliseconds;
+ public BufferTimeout() {}
+
+ @Getter
+ private int maxSize;
+
+ private long maxTimeMiliseconds;
+
+ public Duration getMaxTime() {
+ return Duration.ofMillis(maxTimeMiliseconds);
}
}
private final Pattern jobDataFilter;
- public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters) {
+ @Getter
+ private final AsyncRestClient consumerRestClient;
+
+ public Job(String id, String callbackUrl, InfoType type, String owner, String lastUpdated, Parameters parameters,
+ AsyncRestClient consumerRestClient) {
this.id = id;
this.callbackUrl = callbackUrl;
this.type = type;
} else {
jobDataFilter = null;
}
+ this.consumerRestClient = consumerRestClient;
}
public boolean isFilterMatch(String data) {
package org.oran.dmaapadapter.repository;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.Vector;
+import org.oran.dmaapadapter.clients.AsyncRestClient;
+import org.oran.dmaapadapter.clients.AsyncRestClientFactory;
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
import org.oran.dmaapadapter.exceptions.ServiceException;
-import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
+import org.oran.dmaapadapter.repository.Job.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@Component
public class Jobs {
+ public interface Observer {
+ void onJobbAdded(Job job);
+
+ void onJobRemoved(Job job);
+ }
+
private static final Logger logger = LoggerFactory.getLogger(Jobs.class);
private Map<String, Job> allJobs = new HashMap<>();
private MultiMap<Job> jobsByType = new MultiMap<>();
- private final KafkaTopicConsumers kafkaConsumers;
+ private final AsyncRestClientFactory restclientFactory;
+ private final List<Observer> observers = new ArrayList<>();
- public Jobs(@Autowired KafkaTopicConsumers kafkaConsumers) {
- this.kafkaConsumers = kafkaConsumers;
+ public Jobs(@Autowired ApplicationConfig applicationConfig) {
+ restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
}
public synchronized Job getJob(String id) throws ServiceException {
return allJobs.get(id);
}
- public synchronized void put(Job job) {
+ public void addJob(String id, String callbackUrl, InfoType type, String owner, String lastUpdated,
+ Parameters parameters) {
+ AsyncRestClient consumerRestClient = type.isUseHttpProxy() //
+ ? restclientFactory.createRestClientUseHttpProxy(callbackUrl) //
+ : restclientFactory.createRestClientNoHttpProxy(callbackUrl);
+ Job job = new Job(id, callbackUrl, type, owner, lastUpdated, parameters, consumerRestClient);
+ this.put(job);
+ synchronized (observers) {
+ this.observers.forEach(obs -> obs.onJobbAdded(job));
+ }
+ }
+
+ public void addObserver(Observer obs) {
+ synchronized (observers) {
+ this.observers.add(obs);
+ }
+ }
+
+ private synchronized void put(Job job) {
logger.debug("Put job: {}", job.getId());
allJobs.put(job.getId(), job);
jobsByType.put(job.getType().getId(), job.getId(), job);
- kafkaConsumers.addJob(job);
}
public synchronized Iterable<Job> getAll() {
return job;
}
- public synchronized void remove(Job job) {
- this.allJobs.remove(job.getId());
- jobsByType.remove(job.getType().getId(), job.getId());
- kafkaConsumers.removeJob(job);
+ public void remove(Job job) {
+ synchronized (this) {
+ this.allJobs.remove(job.getId());
+ jobsByType.remove(job.getType().getId(), job.getId());
+ }
+ synchronized (observers) {
+ this.observers.forEach(obs -> obs.onJobRemoved(job));
+ }
}
public synchronized int size() {
import org.oran.dmaapadapter.repository.Jobs;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import org.springframework.http.MediaType;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
* The class fetches incoming requests from DMAAP and sends them further to the
* consumers that has a job for this InformationType.
*/
-
public class DmaapTopicConsumer {
private static final Duration TIME_BETWEEN_DMAAP_RETRIES = Duration.ofSeconds(10);
private static final Logger logger = LoggerFactory.getLogger(DmaapTopicConsumer.class);
private final AsyncRestClient dmaapRestClient;
private final InfiniteFlux infiniteSubmitter = new InfiniteFlux();
- private final AsyncRestClient consumerRestClient;
protected final ApplicationConfig applicationConfig;
protected final InfoType type;
protected final Jobs jobs;
AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
this.dmaapRestClient = restclientFactory.createRestClientNoHttpProxy("");
this.applicationConfig = applicationConfig;
- this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("")
- : restclientFactory.createRestClientNoHttpProxy("");
this.type = type;
this.jobs = jobs;
}
private Mono<String> handleDmaapErrorResponse(Throwable t) {
logger.debug("error from DMAAP {} {}", t.getMessage(), type.getDmaapTopicUrl());
- return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES).flatMap(notUsed -> Mono.empty());
+ return Mono.delay(TIME_BETWEEN_DMAAP_RETRIES) //
+ .flatMap(notUsed -> Mono.empty());
}
private Mono<String> getFromMessageRouter(String topicUrl) {
// Distibute the body to all jobs for this type
return Flux.fromIterable(this.jobs.getJobsForType(this.type)) //
- .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl()))
- .flatMap(job -> consumerRestClient.post(job.getCallbackUrl(), body), CONCURRENCY) //
+ .doOnNext(job -> logger.debug("Sending to consumer {}", job.getCallbackUrl())) //
+ .flatMap(job -> job.getConsumerRestClient().post("", body, MediaType.APPLICATION_JSON), CONCURRENCY) //
.onErrorResume(this::handleConsumerErrorResponse);
}
}
--- /dev/null
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
+import org.oran.dmaapadapter.repository.InfoType;
+import org.oran.dmaapadapter.repository.InfoTypes;
+import org.oran.dmaapadapter.repository.Jobs;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Component;
+
+@Component
+public class DmaapTopicConsumers {
+
+ DmaapTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types, @Autowired Jobs jobs) {
+ // Start a consumer for each type
+ for (InfoType type : types.getAll()) {
+ if (type.isDmaapTopicDefined()) {
+ DmaapTopicConsumer topicConsumer = new DmaapTopicConsumer(appConfig, type, jobs);
+ topicConsumer.start();
+ }
+ }
+ }
+
+}
--- /dev/null
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import lombok.Getter;
+
+import org.oran.dmaapadapter.repository.Job;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.http.MediaType;
+import org.springframework.web.reactive.function.client.WebClientResponseException;
+
+import reactor.core.Disposable;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.core.publisher.Sinks.Many;
+
+/**
+ * The class streams data from a multi cast sink and sends the data to the Job
+ * owner via REST calls.
+ */
+@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
+public class KafkaJobDataConsumer {
+ private static final Logger logger = LoggerFactory.getLogger(KafkaJobDataConsumer.class);
+ @Getter
+ private final Job job;
+ private Disposable subscription;
+ private final ErrorStats errorStats = new ErrorStats();
+
+ private class ErrorStats {
+ private int consumerFaultCounter = 0;
+ private boolean kafkaError = false; // eg. overflow
+
+ public void handleOkFromConsumer() {
+ this.consumerFaultCounter = 0;
+ }
+
+ public void handleException(Throwable t) {
+ if (t instanceof WebClientResponseException) {
+ ++this.consumerFaultCounter;
+ } else {
+ kafkaError = true;
+ }
+ }
+
+ public boolean isItHopeless() {
+ final int STOP_AFTER_ERRORS = 5;
+ return kafkaError || consumerFaultCounter > STOP_AFTER_ERRORS;
+ }
+
+ public void resetKafkaErrors() {
+ kafkaError = false;
+ }
+ }
+
+ public KafkaJobDataConsumer(Job job) {
+ this.job = job;
+ }
+
+ public synchronized void start(Many<String> input) {
+ stop();
+ this.errorStats.resetKafkaErrors();
+ this.subscription = getMessagesFromKafka(input, job) //
+ .flatMap(this::postToClient, job.getParameters().getMaxConcurrency()) //
+ .onErrorResume(this::handleError) //
+ .subscribe(this::handleConsumerSentOk, //
+ t -> stop(), //
+ () -> logger.warn("KafkaMessageConsumer stopped jobId: {}", job.getId()));
+ }
+
+ private Mono<String> postToClient(String body) {
+ logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), body);
+ MediaType contentType = this.job.isBuffered() ? MediaType.APPLICATION_JSON : null;
+ return job.getConsumerRestClient().post("", body, contentType);
+ }
+
+ public synchronized void stop() {
+ if (this.subscription != null) {
+ subscription.dispose();
+ subscription = null;
+ }
+ }
+
+ public synchronized boolean isRunning() {
+ return this.subscription != null;
+ }
+
+ private Flux<String> getMessagesFromKafka(Many<String> input, Job job) {
+ Flux<String> result = input.asFlux() //
+ .filter(job::isFilterMatch);
+
+ if (job.isBuffered()) {
+ result = result.map(this::quote) //
+ .bufferTimeout( //
+ job.getParameters().getBufferTimeout().getMaxSize(), //
+ job.getParameters().getBufferTimeout().getMaxTime()) //
+ .map(Object::toString);
+ }
+ return result;
+ }
+
+ private String quote(String str) {
+ final String q = "\"";
+ return q + str.replace(q, "\\\"") + q;
+ }
+
+ private Mono<String> handleError(Throwable t) {
+ logger.warn("exception: {} job: {}", t.getMessage(), job.getId());
+ this.errorStats.handleException(t);
+ if (this.errorStats.isItHopeless()) {
+ return Mono.error(t);
+ } else {
+ return Mono.empty(); // Ignore
+ }
+ }
+
+ private void handleConsumerSentOk(String data) {
+ this.errorStats.handleOkFromConsumer();
+ }
+
+}
+++ /dev/null
-/*-
- * ========================LICENSE_START=================================
- * O-RAN-SC
- * %%
- * Copyright (C) 2021 Nordix Foundation
- * %%
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================LICENSE_END===================================
- */
-
-package org.oran.dmaapadapter.tasks;
-
-import java.time.Duration;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.kafka.clients.consumer.ConsumerConfig;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.serialization.IntegerDeserializer;
-import org.apache.kafka.common.serialization.StringDeserializer;
-import org.oran.dmaapadapter.clients.AsyncRestClient;
-import org.oran.dmaapadapter.clients.AsyncRestClientFactory;
-import org.oran.dmaapadapter.configuration.ApplicationConfig;
-import org.oran.dmaapadapter.repository.InfoType;
-import org.oran.dmaapadapter.repository.Job;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import reactor.core.Disposable;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
-import reactor.core.publisher.Sinks;
-import reactor.core.publisher.Sinks.Many;
-import reactor.kafka.receiver.KafkaReceiver;
-import reactor.kafka.receiver.ReceiverOptions;
-
-/**
- * The class fetches incoming requests from DMAAP and sends them further to the
- * consumers that has a job for this InformationType.
- */
-@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
-public class KafkaTopicConsumer {
- private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumer.class);
- private final AsyncRestClient consumerRestClient;
- private final ApplicationConfig applicationConfig;
- private final InfoType type;
- private final Many<String> consumerDistributor;
-
- public KafkaTopicConsumer(ApplicationConfig applicationConfig, InfoType type) {
- this.applicationConfig = applicationConfig;
-
- final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 10;
- this.consumerDistributor = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE);
-
- AsyncRestClientFactory restclientFactory = new AsyncRestClientFactory(applicationConfig.getWebClientConfig());
- this.consumerRestClient = type.isUseHttpProxy() ? restclientFactory.createRestClientUseHttpProxy("")
- : restclientFactory.createRestClientNoHttpProxy("");
- this.type = type;
- startKafkaTopicReceiver();
- }
-
- private Disposable startKafkaTopicReceiver() {
- return KafkaReceiver.create(kafkaInputProperties()) //
- .receive() //
- .flatMap(this::onReceivedData) //
- .subscribe(null, //
- throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), //
- () -> logger.warn("KafkaMessageConsumer stopped"));
- }
-
- private Flux<String> onReceivedData(ConsumerRecord<Integer, String> input) {
- logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value());
- consumerDistributor.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST);
- return consumerDistributor.asFlux();
- }
-
- public Disposable startDistributeToConsumer(Job job) {
- return getMessagesFromKafka(job) //
- .doOnNext(data -> logger.debug("Sending to consumer {} {} {}", job.getId(), job.getCallbackUrl(), data))
- .flatMap(body -> consumerRestClient.post(job.getCallbackUrl(), body)) //
- .onErrorResume(this::handleConsumerErrorResponse) //
- .subscribe(null, //
- throwable -> logger.error("KafkaMessageConsumer error: {}", throwable.getMessage()), //
- () -> logger.warn("KafkaMessageConsumer stopped {}", job.getType().getId()));
- }
-
- private Flux<String> getMessagesFromKafka(Job job) {
- if (job.isBuffered()) {
- return consumerDistributor.asFlux() //
- .filter(job::isFilterMatch) //
- .bufferTimeout(job.getParameters().bufferTimeout.maxSize,
- Duration.ofMillis(job.getParameters().bufferTimeout.maxTimeMiliseconds)) //
- .flatMap(o -> Flux.just(o.toString()));
- } else {
- return consumerDistributor.asFlux() //
- .filter(job::isFilterMatch);
- }
- }
-
- private Mono<String> handleConsumerErrorResponse(Throwable t) {
- logger.warn("error from CONSUMER {}", t.getMessage());
- return Mono.empty();
- }
-
- private ReceiverOptions<Integer, String> kafkaInputProperties() {
- Map<String, Object> consumerProps = new HashMap<>();
- if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) {
- logger.error("No kafka boostrap server is setup");
- }
- consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers());
- consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor");
- consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
- consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-
- return ReceiverOptions.<Integer, String>create(consumerProps)
- .subscription(Collections.singleton(this.type.getKafkaInputTopic()));
- }
-
-}
import java.util.HashMap;
import java.util.Map;
+import lombok.Getter;
+
import org.oran.dmaapadapter.configuration.ApplicationConfig;
import org.oran.dmaapadapter.repository.InfoType;
import org.oran.dmaapadapter.repository.InfoTypes;
import org.oran.dmaapadapter.repository.Job;
+import org.oran.dmaapadapter.repository.Jobs;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.scheduling.annotation.EnableScheduling;
+import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
-import reactor.core.Disposable;
-/**
- * The class fetches incoming requests from DMAAP and sends them further to the
- * consumers that has a job for this InformationType.
- */
@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
@Component
+@EnableScheduling
public class KafkaTopicConsumers {
private static final Logger logger = LoggerFactory.getLogger(KafkaTopicConsumers.class);
- private final Map<String, KafkaTopicConsumer> topicConsumers = new HashMap<>();
- private final Map<String, Disposable> activeSubscriptions = new HashMap<>();
- private final ApplicationConfig appConfig;
+ private final Map<String, KafkaTopicListener> topicListeners = new HashMap<>(); // Key is typeId
- public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig) {
- this.appConfig = appConfig;
- }
+ @Getter
+ private final Map<String, KafkaJobDataConsumer> consumers = new HashMap<>(); // Key is jobId
+
+ private static final int CONSUMER_SUPERVISION_INTERVAL_MS = 1000 * 60 * 3;
+
+ public KafkaTopicConsumers(@Autowired ApplicationConfig appConfig, @Autowired InfoTypes types,
+ @Autowired Jobs jobs) {
- public void start(InfoTypes types) {
for (InfoType type : types.getAll()) {
if (type.isKafkaTopicDefined()) {
- KafkaTopicConsumer topicConsumer = new KafkaTopicConsumer(appConfig, type);
- topicConsumers.put(type.getId(), topicConsumer);
+ KafkaTopicListener topicConsumer = new KafkaTopicListener(appConfig, type);
+ topicListeners.put(type.getId(), topicConsumer);
}
}
+
+ jobs.addObserver(new Jobs.Observer() {
+ @Override
+ public void onJobbAdded(Job job) {
+ addJob(job);
+ }
+
+ @Override
+ public void onJobRemoved(Job job) {
+ removeJob(job);
+ }
+
+ });
}
public synchronized void addJob(Job job) {
- if (this.activeSubscriptions.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) {
+ if (this.consumers.get(job.getId()) == null && job.getType().isKafkaTopicDefined()) {
logger.debug("Kafka job added {}", job.getId());
- KafkaTopicConsumer topicConsumer = topicConsumers.get(job.getType().getId());
- Disposable subscription = topicConsumer.startDistributeToConsumer(job);
- activeSubscriptions.put(job.getId(), subscription);
+ KafkaTopicListener topicConsumer = topicListeners.get(job.getType().getId());
+ KafkaJobDataConsumer subscription = new KafkaJobDataConsumer(job);
+ subscription.start(topicConsumer.getOutput());
+ consumers.put(job.getId(), subscription);
}
}
public synchronized void removeJob(Job job) {
- Disposable d = activeSubscriptions.remove(job.getId());
+ KafkaJobDataConsumer d = consumers.remove(job.getId());
if (d != null) {
logger.debug("Kafka job removed {}", job.getId());
- d.dispose();
+ d.stop();
}
}
+ @Scheduled(fixedRate = CONSUMER_SUPERVISION_INTERVAL_MS)
+ public synchronized void restartNonRunningTasks() {
+
+ for (KafkaJobDataConsumer consumer : consumers.values()) {
+ if (!consumer.isRunning()) {
+ restartTopic(consumer);
+ }
+ }
+ }
+
+ private void restartTopic(KafkaJobDataConsumer consumer) {
+ InfoType type = consumer.getJob().getType();
+ KafkaTopicListener topic = this.topicListeners.get(type.getId());
+ topic.start();
+ restartConsumersOfType(topic, type);
+ }
+
+ private void restartConsumersOfType(KafkaTopicListener topic, InfoType type) {
+ this.consumers.forEach((jobId, consumer) -> {
+ if (consumer.getJob().getType().getId().equals(type.getId())) {
+ consumer.start(topic.getOutput());
+ }
+ });
+ }
}
--- /dev/null
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ * %%
+ * Copyright (C) 2021 Nordix Foundation
+ * %%
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package org.oran.dmaapadapter.tasks;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.oran.dmaapadapter.configuration.ApplicationConfig;
+import org.oran.dmaapadapter.repository.InfoType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import reactor.core.Disposable;
+import reactor.core.publisher.Sinks;
+import reactor.core.publisher.Sinks.Many;
+import reactor.kafka.receiver.KafkaReceiver;
+import reactor.kafka.receiver.ReceiverOptions;
+
+/**
+ * The class streams incoming requests from a Kafka topic and sends them further
+ * to a multi cast sink, which several other streams can connect to.
+ */
+@SuppressWarnings("squid:S2629") // Invoke method(s) only conditionally
+public class KafkaTopicListener {
+ private static final Logger logger = LoggerFactory.getLogger(KafkaTopicListener.class);
+ private final ApplicationConfig applicationConfig;
+ private final InfoType type;
+ private Many<String> output;
+ private Disposable topicReceiverTask;
+
+ public KafkaTopicListener(ApplicationConfig applicationConfig, InfoType type) {
+ this.applicationConfig = applicationConfig;
+ this.type = type;
+ start();
+ }
+
+ public Many<String> getOutput() {
+ return this.output;
+ }
+
+ public void start() {
+ stop();
+ final int CONSUMER_BACKPRESSURE_BUFFER_SIZE = 1024 * 10;
+ this.output = Sinks.many().multicast().onBackpressureBuffer(CONSUMER_BACKPRESSURE_BUFFER_SIZE);
+ logger.debug("Listening to kafka topic: {} type :{}", this.type.getKafkaInputTopic(), type.getId());
+ topicReceiverTask = KafkaReceiver.create(kafkaInputProperties()) //
+ .receive() //
+ .doOnNext(this::onReceivedData) //
+ .subscribe(null, //
+ this::onReceivedError, //
+ () -> logger.warn("KafkaTopicReceiver stopped"));
+ }
+
+ private void stop() {
+ if (topicReceiverTask != null) {
+ topicReceiverTask.dispose();
+ topicReceiverTask = null;
+ }
+ }
+
+ private void onReceivedData(ConsumerRecord<String, String> input) {
+ logger.debug("Received from kafka topic: {} :{}", this.type.getKafkaInputTopic(), input.value());
+ output.emitNext(input.value(), Sinks.EmitFailureHandler.FAIL_FAST);
+ }
+
+ private void onReceivedError(Throwable t) {
+ logger.error("KafkaTopicReceiver error: {}", t.getMessage());
+ }
+
+ private ReceiverOptions<String, String> kafkaInputProperties() {
+ Map<String, Object> consumerProps = new HashMap<>();
+ if (this.applicationConfig.getKafkaBootStrapServers().isEmpty()) {
+ logger.error("No kafka boostrap server is setup");
+ }
+ consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, this.applicationConfig.getKafkaBootStrapServers());
+ consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "osc-dmaap-adaptor");
+ consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+ consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+
+ return ReceiverOptions.<String, String>create(consumerProps)
+ .subscription(Collections.singleton(this.type.getKafkaInputTopic()));
+ }
+
+}
}
private void handleRegistrationCompleted() {
- logger.debug("Registering types and producer completed");
isRegisteredInEcs = true;
}
logger.warn("Registration of producer failed {}", t.getMessage());
}
+ // Returns TRUE if registration is correct
private Mono<Boolean> checkRegistration() {
final String url = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID;
return restClient.get(url) //
private Mono<Boolean> isRegisterredInfoCorrect(String registerredInfoStr) {
ProducerRegistrationInfo registerredInfo = gson.fromJson(registerredInfoStr, ProducerRegistrationInfo.class);
if (isEqual(producerRegistrationInfo(), registerredInfo)) {
- logger.trace("Already registered");
+ logger.trace("Already registered in ECS");
return Mono.just(Boolean.TRUE);
} else {
return Mono.just(Boolean.FALSE);
private Mono<String> registerTypesAndProducer() {
final int CONCURRENCY = 20;
- final String producerUrl = applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/"
- + PRODUCER_ID;
+ final String producerUrl =
+ applicationConfig.getEcsBaseUrl() + "/data-producer/v1/info-producers/" + PRODUCER_ID;
return Flux.fromIterable(this.types.getAll()) //
.doOnNext(type -> logger.info("Registering type {}", type.getId())) //
"filter": {
"type": "string"
},
+ "maxConcurrency": {
+ "type": "integer"
+ },
"bufferTimeout": {
"type": "object",
"properties": {
]
}
},
- "required": [
- ]
-}
+ "required": []
+}
\ No newline at end of file
ProducerJobInfo info = new ProducerJobInfo(null, "id", "typeId", "targetUri", "owner", "lastUpdated");
String body = gson.toJson(info);
- testErrorCode(restClient().post(jobUrl, body), HttpStatus.NOT_FOUND, "Could not find type");
+ testErrorCode(restClient().post(jobUrl, body, MediaType.APPLICATION_JSON), HttpStatus.NOT_FOUND,
+ "Could not find type");
}
@Test
public TestResults() {}
+ public boolean hasReceived(String str) {
+ for (String received : receivedBodies) {
+ if (received.equals(str)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
public void reset() {
receivedBodies.clear();
}
new ProducerJobInfo(job.jobDefinition, jobId, job.infoTypeId, job.jobResultUri, job.owner, "TIMESTAMP");
String body = gson.toJson(request);
logger.info("ECS Simulator PUT job: {}", body);
- restClient.post(url, body).block();
+ restClient.post(url, body, MediaType.APPLICATION_JSON).block();
}
public void deleteJob(String jobId, AsyncRestClient restClient) {
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import com.google.gson.JsonParser;
+import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import org.oran.dmaapadapter.repository.InfoTypes;
import org.oran.dmaapadapter.repository.Job;
import org.oran.dmaapadapter.repository.Jobs;
+import org.oran.dmaapadapter.tasks.KafkaJobDataConsumer;
+import org.oran.dmaapadapter.tasks.KafkaTopicConsumers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@Autowired
private EcsSimulatorController ecsSimulatorController;
+ @Autowired
+ private KafkaTopicConsumers kafkaTopicConsumers;
+
private com.google.gson.Gson gson = new com.google.gson.GsonBuilder().create();
private static final Logger logger = LoggerFactory.getLogger(IntegrationWithKafka.class);
return "https://localhost:" + this.applicationConfig.getLocalServerHttpPort();
}
- private Object jobParametersAsJsonObject(String filter, int maxTimeMiliseconds, int maxSize) {
- Job.Parameters param = new Job.Parameters(filter,
- new Job.Parameters.BufferTimeout(maxSize, maxTimeMiliseconds));
+ private Object jobParametersAsJsonObject(String filter, long maxTimeMiliseconds, int maxSize, int maxConcurrency) {
+ Job.Parameters param =
+ new Job.Parameters(filter, new Job.BufferTimeout(maxSize, maxTimeMiliseconds), maxConcurrency);
String str = gson.toJson(param);
return jsonObject(str);
}
}
}
- private ConsumerJobInfo consumerJobInfo(String filter, int maxTimeMiliseconds, int maxSize) {
+ private ConsumerJobInfo consumerJobInfo(String filter, Duration maxTime, int maxSize, int maxConcurrency) {
try {
InfoType type = this.types.getAll().iterator().next();
String typeId = type.getId();
String targetUri = baseUrl() + ConsumerController.CONSUMER_TARGET_URL;
- return new ConsumerJobInfo(typeId, jobParametersAsJsonObject(filter, maxTimeMiliseconds, maxSize), "owner",
- targetUri, "");
+ return new ConsumerJobInfo(typeId,
+ jobParametersAsJsonObject(filter, maxTime.toMillis(), maxSize, maxConcurrency), "owner", targetUri,
+ "");
} catch (Exception e) {
return null;
}
return SenderRecord.create(new ProducerRecord<>(infoType.getKafkaInputTopic(), i, data + i), i);
}
+ private void sendDataToStream(Flux<SenderRecord<Integer, String, Integer>> dataToSend) {
+ final KafkaSender<Integer, String> sender = KafkaSender.create(senderOptions());
+
+ sender.send(dataToSend) //
+ .doOnError(e -> logger.error("Send failed", e)) //
+ .blockLast();
+
+ }
+
+ private void verifiedReceivedByConsumer(String... strings) {
+ ConsumerController.TestResults consumer = this.consumerController.testResults;
+ await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(strings.length));
+ for (String s : strings) {
+ assertTrue(consumer.hasReceived(s));
+ }
+ }
+
@Test
void kafkaIntegrationTest() throws InterruptedException {
final String JOB_ID1 = "ID1";
await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull());
assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1);
- // Create a job
- this.ecsSimulatorController.addJob(consumerJobInfo(".*", 10, 1000), JOB_ID1, restClient());
- this.ecsSimulatorController.addJob(consumerJobInfo(".*Message_1.*", 0, 0), JOB_ID2, restClient());
- await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
+ // Create two jobs. One buffering and one with a filter
+ this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ofMillis(400), 1000, 20), JOB_ID1,
+ restClient());
+ this.ecsSimulatorController.addJob(consumerJobInfo("^Message_1$", Duration.ZERO, 0, 1), JOB_ID2, restClient());
- final KafkaSender<Integer, String> sender = KafkaSender.create(senderOptions());
+ await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
var dataToSend = Flux.range(1, 3).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc.
+ sendDataToStream(dataToSend);
- sender.send(dataToSend) //
- .doOnError(e -> logger.error("Send failed", e)) //
- .doOnNext(senderResult -> logger.debug("Sent {}", senderResult)) //
- .doOnError(t -> logger.error("Error {}", t)) //
- .blockLast();
+ verifiedReceivedByConsumer("Message_1", "[\"Message_1\", \"Message_2\", \"Message_3\"]");
- ConsumerController.TestResults consumer = this.consumerController.testResults;
- await().untilAsserted(() -> assertThat(consumer.receivedBodies.size()).isEqualTo(2));
- assertThat(consumer.receivedBodies.get(0)).isEqualTo("Message_1");
- assertThat(consumer.receivedBodies.get(1)).isEqualTo("[Message_1, Message_2, Message_3]");
+ // Just for testing quoting
+ this.consumerController.testResults.reset();
+ dataToSend = Flux.just(senderRecord("Message\"_", 1));
+ sendDataToStream(dataToSend);
+ verifiedReceivedByConsumer("[\"Message\\\"_1\"]");
- // Delete the job
+ // Delete the jobs
this.ecsSimulatorController.deleteJob(JOB_ID1, restClient());
this.ecsSimulatorController.deleteJob(JOB_ID2, restClient());
await().untilAsserted(() -> assertThat(this.jobs.size()).isZero());
+ await().untilAsserted(() -> assertThat(this.kafkaTopicConsumers.getConsumers()).isEmpty());
+ }
+
+ @Test
+ void kafkaIOverflow() throws InterruptedException {
+ final String JOB_ID1 = "ID1";
+ final String JOB_ID2 = "ID2";
+
+ // Register producer, Register types
+ await().untilAsserted(() -> assertThat(ecsSimulatorController.testResults.registrationInfo).isNotNull());
+ assertThat(ecsSimulatorController.testResults.registrationInfo.supportedTypeIds).hasSize(1);
+
+ // Create two jobs.
+ this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID1, restClient());
+ this.ecsSimulatorController.addJob(consumerJobInfo(null, Duration.ZERO, 0, 1), JOB_ID2, restClient());
+
+ await().untilAsserted(() -> assertThat(this.jobs.size()).isEqualTo(2));
+
+ var dataToSend = Flux.range(1, 1000000).map(i -> senderRecord("Message_", i)); // Message_1, Message_2 etc.
+ sendDataToStream(dataToSend); // this should overflow
+
+ KafkaJobDataConsumer consumer = kafkaTopicConsumers.getConsumers().values().iterator().next();
+ await().untilAsserted(() -> assertThat(consumer.isRunning()).isFalse());
+ this.consumerController.testResults.reset();
+
+ kafkaTopicConsumers.restartNonRunningTasks();
+ this.ecsSimulatorController.deleteJob(JOB_ID2, restClient()); // Delete one job
+ Thread.sleep(1000); // Restarting the input seems to take some asynch time
+
+ dataToSend = Flux.range(1, 1).map(i -> senderRecord("Howdy_", i));
+ sendDataToStream(dataToSend);
+
+ verifiedReceivedByConsumer("Howdy_1");
}
}
##
## Build
##
-FROM golang:1.17-bullseye AS build
+FROM nexus3.o-ran-sc.org:10001/golang:1.17-bullseye AS build
WORKDIR /app
COPY go.mod .
COPY go.sum .
At start up the producer will register the configured job types in ICS and also register itself as a producer supporting these types. If ICS is unavailable, the producer will retry to connect indefinetely. The same goes for MR.
-Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer.
+Once the initial registration is done, the producer will constantly poll MR for all configured job types. When receiving messages for a type, it will distribute these messages to all jobs registered for the type. If no jobs for that type are registered, the messages will be discarded. If a consumer is unavailable for distribution, the messages will be discarded for that consumer until it is available again.
## Development
type TypeData struct {
TypeId string `json:"id"`
DMaaPTopicURL string `json:"dmaapTopicUrl"`
- jobHandler *jobHandler
+ jobsHandler *jobsHandler
}
type JobInfo struct {
}
type JobsManager interface {
- AddJob(JobInfo) error
- DeleteJob(jobId string)
+ AddJobFromRESTCall(JobInfo) error
+ DeleteJobFromRESTCall(jobId string)
}
type JobsManagerImpl struct {
distributeClient restclient.HTTPClient
}
-type jobHandler struct {
- mu sync.Mutex
- typeId string
- topicUrl string
- jobs map[string]JobInfo
- addJobCh chan JobInfo
- deleteJobCh chan string
- pollClient restclient.HTTPClient
- distributeClient restclient.HTTPClient
-}
-
func NewJobsManagerImpl(typeConfigFilePath string, pollClient restclient.HTTPClient, mrAddr string, distributeClient restclient.HTTPClient) *JobsManagerImpl {
return &JobsManagerImpl{
configFile: typeConfigFilePath,
}
}
-func (jm *JobsManagerImpl) AddJob(ji JobInfo) error {
+func (jm *JobsManagerImpl) AddJobFromRESTCall(ji JobInfo) error {
if err := jm.validateJobInfo(ji); err == nil {
typeData := jm.allTypes[ji.InfoTypeIdentity]
- typeData.jobHandler.addJobCh <- ji
+ typeData.jobsHandler.addJobCh <- ji
log.Debug("Added job: ", ji)
return nil
} else {
}
}
-func (jm *JobsManagerImpl) DeleteJob(jobId string) {
+func (jm *JobsManagerImpl) DeleteJobFromRESTCall(jobId string) {
for _, typeData := range jm.allTypes {
log.Debugf("Deleting job %v from type %v", jobId, typeData.TypeId)
- typeData.jobHandler.deleteJobCh <- jobId
+ typeData.jobsHandler.deleteJobCh <- jobId
}
log.Debug("Deleted job: ", jobId)
}
return nil, err
}
for _, typeDef := range typeDefs.Types {
- addCh := make(chan JobInfo)
- deleteCh := make(chan string)
- jh := jobHandler{
- typeId: typeDef.Id,
- topicUrl: typeDef.DmaapTopicURL,
- jobs: make(map[string]JobInfo),
- addJobCh: addCh,
- deleteJobCh: deleteCh,
- pollClient: jm.pollClient,
- distributeClient: jm.distributeClient,
- }
jm.allTypes[typeDef.Id] = TypeData{
TypeId: typeDef.Id,
DMaaPTopicURL: typeDef.DmaapTopicURL,
- jobHandler: &jh,
+ jobsHandler: newJobsHandler(typeDef.Id, typeDef.DmaapTopicURL, jm.pollClient, jm.distributeClient),
}
}
return typeDefs.Types, nil
return supportedTypes
}
-func (jm *JobsManagerImpl) StartJobs() {
+func (jm *JobsManagerImpl) StartJobsForAllTypes() {
for _, jobType := range jm.allTypes {
- go jobType.jobHandler.start(jm.mrAddress)
+ go jobType.jobsHandler.startPollingAndDistribution(jm.mrAddress)
+
+ }
+}
+
+type jobsHandler struct {
+ mu sync.Mutex
+ typeId string
+ topicUrl string
+ jobs map[string]job
+ addJobCh chan JobInfo
+ deleteJobCh chan string
+ pollClient restclient.HTTPClient
+ distributeClient restclient.HTTPClient
+}
+func newJobsHandler(typeId string, topicURL string, pollClient restclient.HTTPClient, distributeClient restclient.HTTPClient) *jobsHandler {
+ return &jobsHandler{
+ typeId: typeId,
+ topicUrl: topicURL,
+ jobs: make(map[string]job),
+ addJobCh: make(chan JobInfo),
+ deleteJobCh: make(chan string),
+ pollClient: pollClient,
+ distributeClient: distributeClient,
}
}
-func (jh *jobHandler) start(mRAddress string) {
+func (jh *jobsHandler) startPollingAndDistribution(mRAddress string) {
go func() {
for {
jh.pollAndDistributeMessages(mRAddress)
}()
}
-func (jh *jobHandler) pollAndDistributeMessages(mRAddress string) {
+func (jh *jobsHandler) pollAndDistributeMessages(mRAddress string) {
log.Debugf("Processing jobs for type: %v", jh.typeId)
messagesBody, error := restclient.Get(mRAddress+jh.topicUrl, jh.pollClient)
if error != nil {
- log.Warnf("Error getting data from MR. Cause: %v", error)
+ log.Warn("Error getting data from MR. Cause: ", error)
}
- log.Debugf("Received messages: %v", string(messagesBody))
+ log.Debug("Received messages: ", string(messagesBody))
jh.distributeMessages(messagesBody)
}
-func (jh *jobHandler) distributeMessages(messages []byte) {
+func (jh *jobsHandler) distributeMessages(messages []byte) {
if len(messages) > 2 {
jh.mu.Lock()
defer jh.mu.Unlock()
- for _, jobInfo := range jh.jobs {
- go jh.sendMessagesToConsumer(messages, jobInfo)
+ for _, job := range jh.jobs {
+ if len(job.messagesChannel) < cap(job.messagesChannel) {
+ job.messagesChannel <- messages
+ } else {
+ jh.emptyMessagesBuffer(job)
+ }
}
}
}
-func (jh *jobHandler) sendMessagesToConsumer(messages []byte, jobInfo JobInfo) {
- log.Debugf("Processing job: %v", jobInfo.InfoJobIdentity)
- if postErr := restclient.Post(jobInfo.TargetUri, messages, jh.distributeClient); postErr != nil {
- log.Warnf("Error posting data for job: %v. Cause: %v", jobInfo, postErr)
+func (jh *jobsHandler) emptyMessagesBuffer(job job) {
+ log.Debug("Emptying message queue for job: ", job.jobInfo.InfoJobIdentity)
+out:
+ for {
+ select {
+ case <-job.messagesChannel:
+ default:
+ break out
+ }
}
- log.Debugf("Messages distributed to consumer: %v.", jobInfo.Owner)
}
-func (jh *jobHandler) monitorManagementChannels() {
+func (jh *jobsHandler) monitorManagementChannels() {
select {
case addedJob := <-jh.addJobCh:
- jh.mu.Lock()
- log.Debugf("received %v from addJobCh\n", addedJob)
- jh.jobs[addedJob.InfoJobIdentity] = addedJob
- jh.mu.Unlock()
+ jh.addJob(addedJob)
case deletedJob := <-jh.deleteJobCh:
- jh.mu.Lock()
- log.Debugf("received %v from deleteJobCh\n", deletedJob)
+ jh.deleteJob(deletedJob)
+ }
+}
+
+func (jh *jobsHandler) addJob(addedJob JobInfo) {
+ jh.mu.Lock()
+ log.Debug("Add job: ", addedJob)
+ newJob := newJob(addedJob, jh.distributeClient)
+ go newJob.start()
+ jh.jobs[addedJob.InfoJobIdentity] = newJob
+ jh.mu.Unlock()
+}
+
+func (jh *jobsHandler) deleteJob(deletedJob string) {
+ jh.mu.Lock()
+ log.Debug("Delete job: ", deletedJob)
+ j, exist := jh.jobs[deletedJob]
+ if exist {
+ j.controlChannel <- struct{}{}
delete(jh.jobs, deletedJob)
- jh.mu.Unlock()
}
+ jh.mu.Unlock()
+}
+
+type job struct {
+ jobInfo JobInfo
+ client restclient.HTTPClient
+ messagesChannel chan []byte
+ controlChannel chan struct{}
+}
+
+func newJob(j JobInfo, c restclient.HTTPClient) job {
+ return job{
+ jobInfo: j,
+ client: c,
+ messagesChannel: make(chan []byte, 10),
+ controlChannel: make(chan struct{}),
+ }
+}
+
+func (j *job) start() {
+out:
+ for {
+ select {
+ case <-j.controlChannel:
+ log.Debug("Stop distribution for job: ", j.jobInfo.InfoJobIdentity)
+ break out
+ case msg := <-j.messagesChannel:
+ j.sendMessagesToConsumer(msg)
+ }
+ }
+}
+
+func (j *job) sendMessagesToConsumer(messages []byte) {
+ log.Debug("Processing job: ", j.jobInfo.InfoJobIdentity)
+ if postErr := restclient.Post(j.jobInfo.TargetUri, messages, j.client); postErr != nil {
+ log.Warnf("Error posting data for job: %v. Cause: %v", j.jobInfo, postErr)
+ }
+ log.Debugf("Messages for job: %v distributed to consumer: %v", j.jobInfo.InfoJobIdentity, j.jobInfo.Owner)
}
const typeDefinition = `{"types": [{"id": "type1", "dmaapTopicUrl": "events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/type1"}]}`
-func TestGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) {
+func TestJobsManagerGetTypes_filesOkShouldReturnSliceOfTypesAndProvideSupportedTypes(t *testing.T) {
assertions := require.New(t)
typesDir, err := os.MkdirTemp("", "configs")
if err != nil {
assertions.EqualValues([]string{"type1"}, supportedTypes)
}
-func TestManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) {
+func TestJobsManagerAddJobWhenTypeIsSupported_shouldAddJobToChannel(t *testing.T) {
assertions := require.New(t)
managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
wantedJob := JobInfo{
InfoJobData: "{}",
InfoTypeIdentity: "type1",
}
- jobHandler := jobHandler{
+ jobsHandler := jobsHandler{
addJobCh: make(chan JobInfo)}
managerUnderTest.allTypes["type1"] = TypeData{
- TypeId: "type1",
- jobHandler: &jobHandler,
+ TypeId: "type1",
+ jobsHandler: &jobsHandler,
}
var err error
go func() {
- err = managerUnderTest.AddJob(wantedJob)
+ err = managerUnderTest.AddJobFromRESTCall(wantedJob)
}()
assertions.Nil(err)
- addedJob := <-jobHandler.addJobCh
+ addedJob := <-jobsHandler.addJobCh
assertions.Equal(wantedJob, addedJob)
}
-func TestManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenTypeIsNotSupported_shouldReturnError(t *testing.T) {
assertions := require.New(t)
managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
jobInfo := JobInfo{
InfoTypeIdentity: "type1",
}
- err := managerUnderTest.AddJob(jobInfo)
+ err := managerUnderTest.AddJobFromRESTCall(jobInfo)
assertions.NotNil(err)
assertions.Equal("type not supported: type1", err.Error())
}
-func TestManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenJobIdMissing_shouldReturnError(t *testing.T) {
assertions := require.New(t)
managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
managerUnderTest.allTypes["type1"] = TypeData{
jobInfo := JobInfo{
InfoTypeIdentity: "type1",
}
- err := managerUnderTest.AddJob(jobInfo)
+ err := managerUnderTest.AddJobFromRESTCall(jobInfo)
assertions.NotNil(err)
assertions.Equal("missing required job identity: { <nil> type1}", err.Error())
}
-func TestManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) {
+func TestJobsManagerAddJobWhenTargetUriMissing_shouldReturnError(t *testing.T) {
assertions := require.New(t)
managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
managerUnderTest.allTypes["type1"] = TypeData{
InfoTypeIdentity: "type1",
InfoJobIdentity: "job1",
}
- err := managerUnderTest.AddJob(jobInfo)
+ err := managerUnderTest.AddJobFromRESTCall(jobInfo)
assertions.NotNil(err)
assertions.Equal("missing required target URI: { job1 <nil> type1}", err.Error())
}
-func TestManagerDeleteJob(t *testing.T) {
+func TestJobsManagerDeleteJob_shouldSendDeleteToChannel(t *testing.T) {
assertions := require.New(t)
managerUnderTest := NewJobsManagerImpl("", nil, "", nil)
- jobHandler := jobHandler{
+ jobsHandler := jobsHandler{
deleteJobCh: make(chan string)}
managerUnderTest.allTypes["type1"] = TypeData{
- TypeId: "type1",
- jobHandler: &jobHandler,
+ TypeId: "type1",
+ jobsHandler: &jobsHandler,
}
- go managerUnderTest.DeleteJob("job2")
+ go managerUnderTest.DeleteJobFromRESTCall("job2")
- assertions.Equal("job2", <-jobHandler.deleteJobCh)
+ assertions.Equal("job2", <-jobsHandler.deleteJobCh)
}
-func TestHandlerPollAndDistributeMessages(t *testing.T) {
+func TestAddJobToJobsManager_shouldStartPollAndDistributeMessages(t *testing.T) {
assertions := require.New(t)
- wg := sync.WaitGroup{}
+ called := false
messages := `[{"message": {"data": "data"}}]`
pollClientMock := NewTestClient(func(req *http.Request) *http.Response {
if req.URL.String() == "http://mrAddr/topicUrl" {
assertions.Equal(req.Method, "GET")
- wg.Done() // Signal that the poll call has been made
+ body := "[]"
+ if !called {
+ called = true
+ body = messages
+ }
return &http.Response{
StatusCode: 200,
- Body: ioutil.NopCloser(bytes.NewReader([]byte(messages))),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte(body))),
Header: make(http.Header), // Must be set to non-nil value or it panics
}
}
t.Fail()
return nil
})
+
+ wg := sync.WaitGroup{}
distributeClientMock := NewTestClient(func(req *http.Request) *http.Response {
if req.URL.String() == "http://consumerHost/target" {
assertions.Equal(req.Method, "POST")
- assertions.Equal(messages, getBodyAsString(req))
+ assertions.Equal(messages, getBodyAsString(req, t))
assertions.Equal("application/json", req.Header.Get("Content-Type"))
- wg.Done() // Signal that the distribution call has been made
+ wg.Done()
return &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewBufferString(`OK`)),
t.Fail()
return nil
})
+ jobsHandler := newJobsHandler("type1", "/topicUrl", pollClientMock, distributeClientMock)
+
+ jobsManager := NewJobsManagerImpl("", pollClientMock, "http://mrAddr", distributeClientMock)
+ jobsManager.allTypes["type1"] = TypeData{
+ DMaaPTopicURL: "/topicUrl",
+ TypeId: "type1",
+ jobsHandler: jobsHandler,
+ }
+
+ jobsManager.StartJobsForAllTypes()
jobInfo := JobInfo{
InfoTypeIdentity: "type1",
InfoJobIdentity: "job1",
TargetUri: "http://consumerHost/target",
}
- handlerUnderTest := jobHandler{
- topicUrl: "/topicUrl",
- jobs: map[string]JobInfo{jobInfo.InfoJobIdentity: jobInfo},
- pollClient: pollClientMock,
- distributeClient: distributeClientMock,
- }
- wg.Add(2) // Two calls should be made to the server, one to poll and one to distribute
- handlerUnderTest.pollAndDistributeMessages("http://mrAddr")
+ wg.Add(1) // Wait till the distribution has happened
+ err := jobsManager.AddJobFromRESTCall(jobInfo)
+ assertions.Nil(err)
- if waitTimeout(&wg, 100*time.Millisecond) {
+ if waitTimeout(&wg, 2*time.Second) {
t.Error("Not all calls to server were made")
t.Fail()
}
}
-func TestHandlerAddJob_shouldAddJobToJobsMap(t *testing.T) {
- assertions := require.New(t)
+func TestJobsHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) {
+ jobToDelete := newJob(JobInfo{}, nil)
+ go jobToDelete.start()
+ jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil)
+ jobsHandler.jobs["job1"] = jobToDelete
- jobInfo := JobInfo{
- InfoTypeIdentity: "type1",
- InfoJobIdentity: "job1",
- TargetUri: "http://consumerHost/target",
- }
+ go jobsHandler.monitorManagementChannels()
- addCh := make(chan JobInfo)
- handlerUnderTest := jobHandler{
- mu: sync.Mutex{},
- jobs: map[string]JobInfo{},
- addJobCh: addCh,
- }
+ jobsHandler.deleteJobCh <- "job1"
- go func() {
- addCh <- jobInfo
- }()
-
- handlerUnderTest.monitorManagementChannels()
-
- assertions.Len(handlerUnderTest.jobs, 1)
- assertions.Equal(jobInfo, handlerUnderTest.jobs["job1"])
+ deleted := false
+ for i := 0; i < 100; i++ {
+ if len(jobsHandler.jobs) == 0 {
+ deleted = true
+ break
+ }
+ time.Sleep(time.Microsecond) // Need to drop control to let the job's goroutine do the job
+ }
+ require.New(t).True(deleted, "Job not deleted")
}
-func TestHandlerDeleteJob_shouldDeleteJobFromJobsMap(t *testing.T) {
- assertions := require.New(t)
+func TestJobsHandlerEmptyJobMessageBufferWhenItIsFull(t *testing.T) {
+ job := newJob(JobInfo{
+ InfoJobIdentity: "job",
+ }, nil)
- deleteCh := make(chan string)
- handlerUnderTest := jobHandler{
- mu: sync.Mutex{},
- jobs: map[string]JobInfo{"job1": {
- InfoJobIdentity: "job1",
- }},
- deleteJobCh: deleteCh,
- }
+ jobsHandler := newJobsHandler("type1", "/topicUrl", nil, nil)
+ jobsHandler.jobs["job1"] = job
- go func() {
- deleteCh <- "job1"
- }()
+ fillMessagesBuffer(job.messagesChannel)
- handlerUnderTest.monitorManagementChannels()
+ jobsHandler.distributeMessages([]byte("sent msg"))
- assertions.Len(handlerUnderTest.jobs, 0)
+ require.New(t).Len(job.messagesChannel, 0)
+}
+
+func fillMessagesBuffer(mc chan []byte) {
+ for i := 0; i < cap(mc); i++ {
+ mc <- []byte("msg")
+ }
}
type RoundTripFunc func(req *http.Request) *http.Response
}
}
-func getBodyAsString(req *http.Request) string {
+func getBodyAsString(req *http.Request, t *testing.T) string {
buf := new(bytes.Buffer)
- buf.ReadFrom(req.Body)
+ if _, err := buf.ReadFrom(req.Body); err != nil {
+ t.Fail()
+ }
return buf.String()
}
http.Error(w, fmt.Sprintf("Invalid json body. Cause: %v", unmarshalErr), http.StatusBadRequest)
return
}
- if err := h.jobsManager.AddJob(jobInfo); err != nil {
+ if err := h.jobsManager.AddJobFromRESTCall(jobInfo); err != nil {
http.Error(w, fmt.Sprintf("Invalid job info. Cause: %v", err), http.StatusBadRequest)
}
}
return
}
- h.jobsManager.DeleteJob(id)
+ h.jobsManager.DeleteJobFromRESTCall(id)
}
type notFoundHandler struct{}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
jobHandlerMock := jobhandler.JobHandler{}
- jobHandlerMock.On("AddJob", tt.args.job).Return(tt.args.mockReturn)
+ jobHandlerMock.On("AddJobFromRESTCall", tt.args.job).Return(tt.args.mockReturn)
callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock)
assertions.Equal(tt.wantedStatus, responseRecorder.Code, tt.name)
assertions.Contains(responseRecorder.Body.String(), tt.wantedBody, tt.name)
- jobHandlerMock.AssertCalled(t, "AddJob", tt.args.job)
+ jobHandlerMock.AssertCalled(t, "AddJobFromRESTCall", tt.args.job)
})
}
}
func TestDeleteJob(t *testing.T) {
assertions := require.New(t)
jobHandlerMock := jobhandler.JobHandler{}
- jobHandlerMock.On("DeleteJob", mock.Anything).Return(nil)
+ jobHandlerMock.On("DeleteJobFromRESTCall", mock.Anything).Return(nil)
callbackHandlerUnderTest := NewProducerCallbackHandler(&jobHandlerMock)
assertions.Equal("", responseRecorder.Body.String())
- jobHandlerMock.AssertCalled(t, "DeleteJob", "job1")
+ jobHandlerMock.AssertCalled(t, "DeleteJobFromRESTCall", "job1")
}
func newRequest(method string, url string, jobInfo *jobs.JobInfo, t *testing.T) *http.Request {
if err := registerTypesAndProducer(jobsManager, configuration.InfoCoordinatorAddress, callbackAddress, retryClient); err != nil {
log.Fatalf("Stopping producer due to: %v", err)
}
- jobsManager.StartJobs()
+ jobsManager.StartJobsForAllTypes()
log.Debug("Starting DMaaP Mediator Producer")
go func() {
}
// AddJob provides a mock function with given fields: _a0
-func (_m *JobHandler) AddJob(_a0 jobs.JobInfo) error {
+func (_m *JobHandler) AddJobFromRESTCall(_a0 jobs.JobInfo) error {
ret := _m.Called(_a0)
var r0 error
}
// DeleteJob provides a mock function with given fields: jobId
-func (_m *JobHandler) DeleteJob(jobId string) {
+func (_m *JobHandler) DeleteJobFromRESTCall(jobId string) {
_m.Called(jobId)
}
registerJob(*port)
fmt.Print("Starting consumer on port: ", *port)
- http.ListenAndServe(fmt.Sprintf(":%v", *port), nil)
+ fmt.Println(http.ListenAndServe(fmt.Sprintf(":%v", *port), nil))
}
func registerJob(port int) {
http.HandleFunc("/events/unauthenticated.SEC_FAULT_OUTPUT/dmaapmediatorproducer/STD_Fault_Messages", handleData)
fmt.Print("Starting mr on port: ", *port)
- http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil)
+ fmt.Println(http.ListenAndServeTLS(fmt.Sprintf(":%v", *port), "../../security/producer.crt", "../../security/producer.key", nil))
}
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+#PMS
+PMS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-policy-agent"
+PMS_IMAGE_TAG="2.2.0"
+
+#A1_SIM
+A1_SIM_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator"
+A1_SIM_IMAGE_TAG="2.1.0"
+
+#RAPP
+RAPP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-r-app-catalogue"
+RAPP_IMAGE_TAG="1.0.0"
+
+#CONTROL_PANEL
+CONTROL_PANEL_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-controlpanel"
+CONTROL_PANEL_IMAGE_TAG="2.2.0"
+
+#GATEWAY
+NONRTRIC_GATEWAY_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-gateway"
+NONRTRIC_GATEWAY_IMAGE_TAG="1.0.0"
+
+#ECS
+ECS_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-enrichment-coordinator-service"
+ECS_IMAGE_TAG="1.1.0"
+
+#CONSUMER
+CONSUMER_IMAGE_BASE="eexit/mirror-http-server"
+CONSUMER_IMAGE_TAG="latest"
+
+#ORU
+ORU_APP_IMAGE_BASE="nexus3.o-ran-sc.org:10002/o-ran-sc/nonrtric-o-ru-closed-loop-recovery"
+ORU_APP_IMAGE_TAG="1.0.0"
+
+#DB
+DB_IMAGE_BASE="mysql/mysql-server"
+DB_IMAGE_TAG="5.6"
+
+#A1CONTROLLER
+A1CONTROLLER_IMAGE_BASE="nexus3.onap.org:10002/onap/sdnc-image"
+A1CONTROLLER_IMAGE_TAG="2.1.2"
+
+#DMAAP_MEDIATOR_GO
+DMAAP_MEDIATOR_GO_BASE="nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer"
+DMAAP_MEDIATOR_GO_TAG="1.0,0"
+
+#DMAAP_MEDIATOR_JAVA
+DMAAP_MEDIATOR_JAVA_BASE="nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor"
+DMAAP_MEDIATOR_JAVA_TAG="1.0.0-SNAPSHOT"
\ No newline at end of file
services:
a1-sim-OSC:
- image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+ image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
container_name: a1-sim-OSC
networks:
- default
- ALLOW_HTTP=true
a1-sim-STD:
- image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+ image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
container_name: a1-sim-STD
networks:
- default
- ALLOW_HTTP=true
a1-sim-STD-v2:
- image: nexus3.o-ran-sc.org:10002/o-ran-sc/a1-simulator:2.1.0
+ image: "${A1_SIM_IMAGE_BASE}:${A1_SIM_IMAGE_TAG}"
container_name: a1-sim-STD-v2
networks:
- default
services:
dmaap-mediator-go:
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-dmaap-mediator-producer:1.0.0
+ image: "${DMAAP_MEDIATOR_GO_BASE}:${DMAAP_MEDIATOR_GO_TAG}"
container_name: dmaap-mediator-go
environment:
- INFO_PRODUCER_HOST=http://consumer
- - LOG_LEVEL=Debug
- INFO_PRODUCER_PORT=8088
- INFO_COORD_ADDR=http://ecs:8083
- - MR_HOST=http://dmaap-mr
- - MR_PORT=3904
- - INFO_PRODUCER_SUPERVISION_CALLBACK_HOST=http://consumer
- - INFO_PRODUCER_SUPERVISION_CALLBACK_PORT=8088
- - INFO_JOB_CALLBACK_HOST=http://consumer
- - INFO_JOB_CALLBACK_PORT=8088
+ - DMAAP_MR_ADDR=http://dmaap-mr:3904
+ - PRODUCER_CERT_PATH=security/producer.crt
+ - PRODUCER_KEY_PATH=security/producer.key
+ - LOG_LEVEL=Debug
networks:
- default
\ No newline at end of file
services:
dmaap-mediator-java:
- image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-dmaap-adaptor:1.0.0-SNAPSHOT
+ image: "${DMAAP_MEDIATOR_JAVA_BASE}:${DMAAP_MEDIATOR_JAVA_TAG}"
container_name: dmaap-mediator-java
networks:
- default
services:
ecs:
- image: nexus3.o-ran-sc.org:10003/o-ran-sc/nonrtric-enrichment-coordinator-service:1.2.0-SNAPSHOT
+ image: "${ECS_IMAGE_BASE}:${ECS_IMAGE_TAG}"
container_name: ecs
networks:
default:
- 8083:8083
- 8434:8434
consumer:
- image: eexit/mirror-http-server
+ image: "${CONSUMER_IMAGE_BASE}:${CONSUMER_IMAGE_TAG}"
container_name: consumer
networks:
- default
services:
policy-agent:
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-policy-agent:2.3.0
+ image: "${PMS_IMAGE_BASE}:${PMS_IMAGE_TAG}"
container_name: policy-agent
networks:
default:
services:
r-app:
- image: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-r-app-catalogue:1.1.0
+ image: "${RAPP_IMAGE_BASE}:${RAPP_IMAGE_TAG}"
container_name: r-app
networks:
default:
logger.debug("{} POST uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} POST body: {}", traceTag, body);
Mono<String> bodyProducer = body != null ? Mono.just(body) : Mono.empty();
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.post() //
- .uri(uri) //
- .contentType(MediaType.APPLICATION_JSON) //
- .body(bodyProducer, String.class);
- return retrieve(traceTag, request);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .post() //
+ .uri(uri) //
+ .contentType(MediaType.APPLICATION_JSON) //
+ .body(bodyProducer, String.class);
+ return retrieve(traceTag, request);
}
public Mono<String> post(String uri, @Nullable String body) {
return postForEntity(uri, body) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
public Mono<String> postWithAuthHeader(String uri, String body, String username, String password) {
Object traceTag = createTraceTag();
logger.debug("{} POST (auth) uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} POST body: {}", traceTag, body);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.post() //
- .uri(uri) //
- .headers(headers -> headers.setBasicAuth(username, password)) //
- .contentType(MediaType.APPLICATION_JSON) //
- .bodyValue(body);
- return retrieve(traceTag, request) //
- .flatMap(this::toBody);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .post() //
+ .uri(uri) //
+ .headers(headers -> headers.setBasicAuth(username, password)) //
+ .contentType(MediaType.APPLICATION_JSON) //
+ .bodyValue(body);
+ return retrieve(traceTag, request) //
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> putForEntity(String uri, String body) {
Object traceTag = createTraceTag();
logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} PUT body: {}", traceTag, body);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.put() //
- .uri(uri) //
- .contentType(MediaType.APPLICATION_JSON) //
- .bodyValue(body);
- return retrieve(traceTag, request);
- });
+
+ RequestHeadersSpec<?> request = getWebClient() //
+ .put() //
+ .uri(uri) //
+ .contentType(MediaType.APPLICATION_JSON) //
+ .bodyValue(body);
+ return retrieve(traceTag, request);
}
public Mono<ResponseEntity<String>> putForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} PUT uri = '{}{}''", traceTag, baseUrl, uri);
logger.trace("{} PUT body: <empty>", traceTag);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.put() //
- .uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient() //
+ .put() //
+ .uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> put(String uri, String body) {
return putForEntity(uri, body) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> getForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} GET uri = '{}{}''", traceTag, baseUrl, uri);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.get().uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient().get().uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> get(String uri) {
return getForEntity(uri) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
public Mono<ResponseEntity<String>> deleteForEntity(String uri) {
Object traceTag = createTraceTag();
logger.debug("{} DELETE uri = '{}{}''", traceTag, baseUrl, uri);
- return getWebClient() //
- .flatMap(client -> {
- RequestHeadersSpec<?> request = client.delete().uri(uri);
- return retrieve(traceTag, request);
- });
+ RequestHeadersSpec<?> request = getWebClient().delete().uri(uri);
+ return retrieve(traceTag, request);
}
public Mono<String> delete(String uri) {
return deleteForEntity(uri) //
- .flatMap(this::toBody);
+ .map(this::toBody);
}
private Mono<ResponseEntity<String>> retrieve(Object traceTag, RequestHeadersSpec<?> request) {
}
}
- private Mono<String> toBody(ResponseEntity<String> entity) {
+ private String toBody(ResponseEntity<String> entity) {
if (entity.getBody() == null) {
- return Mono.just("");
+ return "";
} else {
- return Mono.just(entity.getBody());
+ return entity.getBody();
}
}
.build();
}
- private Mono<WebClient> getWebClient() {
+ private WebClient getWebClient() {
if (this.webClient == null) {
this.webClient = buildWebClient(baseUrl);
}
- return Mono.just(buildWebClient(baseUrl));
+ return this.webClient;
}
-
}
return validatePutEiJob(eiJobId, eiJobObject) //
.flatMap(this::startEiJob) //
.doOnNext(newEiJob -> this.eiJobs.put(newEiJob)) //
- .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)))
+ .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) //
.onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.INTERNAL_SERVER_ERROR)));
}
return this.producerCallbacks.startInfoSubscriptionJob(newEiJob, infoProducers) //
.doOnNext(noOfAcceptingProducers -> this.logger.debug(
"Started EI job {}, number of activated producers: {}", newEiJob.getId(), noOfAcceptingProducers)) //
- .flatMap(noOfAcceptingProducers -> Mono.just(newEiJob));
+ .map(noOfAcceptingProducers -> newEiJob);
}
private Mono<InfoJob> validatePutEiJob(String eiJobId, A1eEiJobInfo eiJobInfo) {
return validatePutInfoJob(jobId, informationJobObject, performTypeCheck) //
.flatMap(this::startInfoSubscriptionJob) //
.doOnNext(this.infoJobs::put) //
- .flatMap(newEiJob -> Mono.just(new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)))
+ .map(newEiJob -> new ResponseEntity<>(isNewJob ? HttpStatus.CREATED : HttpStatus.OK)) //
.onErrorResume(throwable -> Mono.just(ErrorResponse.create(throwable, HttpStatus.NOT_FOUND)));
}
return this.producerCallbacks.startInfoSubscriptionJob(newInfoJob, infoProducers) //
.doOnNext(noOfAcceptingProducers -> this.logger.debug("Started job {}, number of activated producers: {}",
newInfoJob.getId(), noOfAcceptingProducers)) //
- .flatMap(noOfAcceptingProducers -> Mono.just(newInfoJob));
+ .map(noOfAcceptingProducers -> newInfoJob);
}
private Mono<InfoJob> validatePutInfoJob(String jobId, ConsumerJobInfo jobInfo, boolean performTypeCheck) {
return Flux.fromIterable(getProducersForJob(infoJob, infoProducers)) //
.flatMap(infoProducer -> startInfoJob(infoProducer, infoJob, retrySpec)) //
.collectList() //
- .flatMap(okResponses -> Mono.just(Integer.valueOf(okResponses.size()))); //
+ .map(okResponses -> Integer.valueOf(okResponses.size())); //
}
/**
private Mono<String> notifySubscriber(Function<? super SubscriptionInfo, Mono<String>> notifyFunc,
SubscriptionInfo subscriptionInfo) {
Retry retrySpec = Retry.backoff(3, Duration.ofSeconds(1));
- return Mono.just(1) //
- .flatMap(notUsed -> notifyFunc.apply(subscriptionInfo)) //
+ return notifyFunc.apply(subscriptionInfo) //
.retryWhen(retrySpec) //
.onErrorResume(throwable -> {
logger.warn("Consumer callback failed {}, removing subscription {}", throwable.getMessage(),
})//
.doOnNext(response -> handleRespondingProducer(response, producer))
.flatMap(response -> checkProducerJobs(producer)) //
- .flatMap(responses -> Mono.just(producer));
+ .map(responses -> producer);
}
private Mono<?> checkProducerJobs(InfoProducer producer) {
// Test that subscriptions are removed for a unresponsive consumer
// PUT a subscription with a junk callback
- final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "JUNK", "owner");
+ final ConsumerTypeSubscriptionInfo info = new ConsumerTypeSubscriptionInfo(baseUrl() + "/JUNK", "owner");
String body = gson.toJson(info);
restClient().putForEntity(typeSubscriptionUrl() + "/subscriptionId", body).block();
assertThat(this.infoTypeSubscriptions.size()).isEqualTo(1);
-Subproject commit 3b916e4dc5777863cb4ee873b41ee460fb9aec27
+Subproject commit 558d6d2de33bb8cf4b16df980a0cdf3b1747a8e2
DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY "
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
start_ric_simulators ricsim_g3 1 STD_2.0.0
fi
- start_mr
+ start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
start_cr
TC_ONELINE_DESCR="App test DMAAP Meditor and DMAAP Adapter"
#App names to include in the test when running docker, space separated list
-DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+DOCKER_INCLUDED_IMAGES="ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR CR"
+KUBE_INCLUDED_IMAGES=" ECS DMAAPMED DMAAPADP KUBEPROXY MR DMAAPMR CR"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
set_ecs_trace
-start_mr
+start_mr "unauthenticated.dmaapmed.json" "/events" "dmaapmediatorproducer/STD_Fault_Messages" \
+ "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+ "unauthenticated.dmaapadp_kafka.text" "/events" "dmaapadapterproducer/msgs"
start_dmaapadp NOPROXY $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_CONFIG_FILE $SIM_GROUP/$DMAAP_ADP_COMPOSE_DIR/$DMAAP_ADP_DATA_FILE
# Check producers
ecs_api_idc_get_job_ids 200 NOTYPE NOWNER EMPTY
-ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages
+ecs_api_idc_get_type_ids 200 ExampleInformationType STD_Fault_Messages ExampleInformationTypeKafka
ecs_api_edp_get_producer_ids_2 200 NOTYPE DmaapGenericInfoProducer DMaaP_Mediator_Producer
-# Create jobs for adapter
+# Create jobs for adapter - CR stores data as MD5 hash
start_timer "Create adapter jobs: $NUM_JOBS"
for ((i=1; i<=$NUM_JOBS; i++))
do
- ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i info-owner-adp-$i $CR_SERVICE_MR_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+ ecs_api_idc_put_job 201 job-adp-$i ExampleInformationType $CR_SERVICE_MR_PATH/job-adp-data$i"?storeas=md5" info-owner-adp-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-$i testdata/dmaap-adapter/job-template.json
+
done
print_timer "Create adapter jobs: $NUM_JOBS"
-# Create jobs for mediator
+# Create jobs for adapter kafka - CR stores data as MD5 hash
+start_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ ecs_api_idc_put_job 201 job-adp-kafka-$i ExampleInformationTypeKafka $CR_SERVICE_TEXT_PATH/job-adp-kafka-data$i"?storeas=md5" info-owner-adp-kafka-$i $CR_SERVICE_APP_PATH/job_status_info-owner-adp-kafka-$i testdata/dmaap-adapter/job-template-1-kafka.json
+
+done
+print_timer "Create adapter (kafka) jobs: $NUM_JOBS"
+
+# Create jobs for mediator - CR stores data as MD5 hash
start_timer "Create mediator jobs: $NUM_JOBS"
for ((i=1; i<=$NUM_JOBS; i++))
do
- ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i info-owner-med-$i $CR_SERVICE_MR_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
+ ecs_api_idc_put_job 201 job-med-$i STD_Fault_Messages $CR_SERVICE_MR_PATH/job-med-data$i"?storeas=md5" info-owner-med-$i $CR_SERVICE_APP_PATH/job_status_info-owner-med-$i testdata/dmaap-adapter/job-template.json
done
print_timer "Create mediator jobs: $NUM_JOBS"
do
ecs_api_a1_get_job_status 200 job-med-$i ENABLED 30
ecs_api_a1_get_job_status 200 job-adp-$i ENABLED 30
+ ecs_api_a1_get_job_status 200 job-adp-kafka-$i ENABLED 30
done
+
EXPECTED_DATA_DELIV=0
-# Send data to adapter via mr
+mr_api_generate_json_payload_file 1 ./tmp/data_for_dmaap_test.json
+mr_api_generate_text_payload_file 1 ./tmp/data_for_dmaap_test.txt
+
+## Send json file via message-router to adapter
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapadp.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-adp-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+## Send text file via message-router to adapter kafka
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_text_file "/events/unauthenticated.dmaapadp_kafka.text" ./tmp/data_for_dmaap_test.txt
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from adapter kafka
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+ cr_api_check_single_genric_event_md5_file 200 job-adp-kafka-data$i ./tmp/data_for_dmaap_test.txt
+done
+
+## Send json file via message-router to mediator
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+EXPECTED_DATA_DELIV=$(($NUM_JOBS+$EXPECTED_DATA_DELIV))
+mr_api_send_json_file "/events/unauthenticated.dmaapmed.json" ./tmp/data_for_dmaap_test.json
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 200
+
+# Check received data callbacks from mediator
+for ((i=1; i<=$NUM_JOBS; i++))
+do
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+ cr_api_check_single_genric_event_md5_file 200 job-med-data$i ./tmp/data_for_dmaap_test.json
+done
+
+
+# Send small json via message-router to adapter
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-1"}'
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-3"}'
start_timer "Data delivery adapter, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
print_timer "Data delivery adapter, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
-# Send data to mediator
+# Send small text via message-routere to adapter
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------1'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------3'
+
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapte kafkar, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
+print_timer "Data delivery adapte kafkar, 2 strings per job"
+
+# Send small json via message-router to mediator
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-0"}'
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-2"}'
start_timer "Data delivery mediator, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 100
print_timer "Data delivery mediator, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_equal received_callbacks?id=job-med-data$i 2
- cr_equal received_callbacks?id=job-adp-data$i 2
+ cr_equal received_callbacks?id=job-med-data$i 7
+ cr_equal received_callbacks?id=job-adp-data$i 7
+ cr_equal received_callbacks?id=job-adp-kafka-data$i 7
done
# Check received data and order for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-0"}'
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-2"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-1"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-3"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-0"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-2"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-1"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-3"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------1'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------3'
done
# Set delay in the callback receiver to slow down callbacks
-SEC_DELAY=5
+SEC_DELAY=2
cr_delay_callback 200 $SEC_DELAY
-# Send data to adapter via mr
+# Send small json via message-router to adapter
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-5"}'
mr_api_send_json "/events/unauthenticated.dmaapadp.json" '{"msg":"msg-7"}'
# Wait for data recetption, adapter
EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
-print_timer "Data delivery adapter with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery adapter with $SEC_DELAY seconds delay in consumer, 2 json per job"
+
+# Send small text via message-router to adapter kafka
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------5'
+mr_api_send_text "/events/unauthenticated.dmaapadp_kafka.text" 'Message-------7'
-# Send data to mediator
+# Wait for data recetption, adapter kafka
+EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
+start_timer "Data delivery adapter kafka with $SEC_DELAY seconds delay in consumer, 2 strings per job"
+cr_equal received_callbacks $EXPECTED_DATA_DELIV $(($NUM_JOBS+300))
+print_timer "Data delivery adapter with kafka $SEC_DELAY seconds delay in consumer, 2 strings per job"
+
+
+# Send small json via message-router to mediator
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-4"}'
mr_api_send_json "/events/unauthenticated.dmaapmed.json" '{"msg":"msg-6"}'
# Wait for data reception, mediator
EXPECTED_DATA_DELIV=$(($NUM_JOBS*2+$EXPECTED_DATA_DELIV))
-start_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
+start_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
cr_equal received_callbacks $EXPECTED_DATA_DELIV 1000
-print_timer "Data delivery mediator with $SEC_DELAY seconds delay, 2 json per job"
-EXPECTED_DATA_DELIV=$(cr_read received_callbacks)
+print_timer "Data delivery mediator with $SEC_DELAY seconds delay in consumer, 2 json per job"
# Check received number of messages for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_equal received_callbacks?id=job-med-data$i 4
- cr_equal received_callbacks?id=job-adp-data$i 4
+ cr_equal received_callbacks?id=job-med-data$i 9
+ cr_equal received_callbacks?id=job-adp-data$i 9
+ cr_equal received_callbacks?id=job-adp-kafka-data$i 9
done
# Check received data and order for mediator and adapter callbacks
for ((i=1; i<=$NUM_JOBS; i++))
do
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-4"}'
- cr_api_check_single_genric_json_event 200 job-med-data$i '{"msg":"msg-6"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-5"}'
- cr_api_check_single_genric_json_event 200 job-adp-data$i '{"msg":"msg-7"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-4"}'
+ cr_api_check_single_genric_event_md5 200 job-med-data$i '{"msg":"msg-6"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-5"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-data$i '{"msg":"msg-7"}'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------5'
+ cr_api_check_single_genric_event_md5 200 job-adp-kafka-data$i 'Message-------7'
done
-
-
#### TEST COMPLETE ####
store_logs END
print_result
-auto_clean_environment
\ No newline at end of file
+auto_clean_environment
DOCKER_INCLUDED_IMAGES="CBS CONSUL CP CR MR DMAAPMR PA RICSIM SDNC NGW KUBEPROXY"
#App names to include in the test when running kubernetes, space separated list
-KUBE_INCLUDED_IMAGES="CP CR MR PA RICSIM SDNC KUBEPROXY NGW"
+KUBE_INCLUDED_IMAGES="CP CR MR DMAAPMR PA RICSIM SDNC KUBEPROXY NGW"
#Prestarted app (not started by script) to include in the test when running kubernetes, space separated list
KUBE_PRESTARTED_IMAGES=""
start_ric_simulators $RIC_SIM_PREFIX"_g3" $STD_NUM_RICS STD_2.0.0
- start_mr
+ start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub"
start_control_panel $SIM_GROUP/$CONTROL_PANEL_COMPOSE_DIR/$CONTROL_PANEL_CONFIG_FILE
clean_environment
start_kube_proxy
-start_mr
+start_mr "$MR_READ_TOPIC" "/events" "users/policy-agent" \
+ "$MR_WRITE_TOPIC" "/events" "users/mr-stub" \
+ "unauthenticated.dmaapadp.json" "/events" "dmaapadapterproducer/msgs" \
+ "unauthenticated.dmaapmed.json" "/events" "maapmediatorproducer/STD_Fault_Messages"
+
if [ $RUNMODE == "KUBE" ]; then
:
else
--- /dev/null
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "properties": {
+ "filter": {
+ "type": "string"
+ },
+ "maxConcurrency": {
+ "type": "integer"
+ },
+ "bufferTimeout": {
+ "type": "object",
+ "properties": {
+ "maxSize": {
+ "type": "integer"
+ },
+ "maxTimeMiliseconds": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "maxSize",
+ "maxTimeMiliseconds"
+ ]
+ }
+ },
+ "required": []
+}
\ No newline at end of file
--- /dev/null
+{
+ "maxConcurrency": 1,
+ "bufferTimeout": {
+ "maxSize": 1,
+ "maxTimeMiliseconds": 0
+ }
+}
\ No newline at end of file
| `--print-stats` | Prints the number of tests, failed tests, failed configuration and deviations after each individual test or config |
| `--override <file>` | Override setting from the file supplied by --env-file |
| `--pre-clean` | Clean kube resouces when running docker and vice versa |
+| `--gen-stats` | Collect container/pod runtime statistics |
| `help` | Print this info along with the test script description and the list of app short names supported |
## Function: setup_testenvironment ##
use_agent_rest_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PA_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "PA $POLICY_AGENT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "PA $POLICY_AGENT_APP_NAME"
+ fi
+}
+
+
#######################################################
###########################
# one for sending the requests and one for receiving the response
# but only when using the DMAAP interface
# REST or DMAAP is controlled of the base url of $XX_ADAPTER
-# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file>]) | (PA|ECS RESPONSE <correlation-id>)
+# arg: (PA|ECS|CR|RC GET|PUT|POST|DELETE|GET_BATCH|PUT_BATCH|POST_BATCH|DELETE_BATCH <url>|<correlation-id> [<file> [mime-type]]) | (PA|ECS RESPONSE <correlation-id>)
+# Default mime type for file is application/json unless specified in parameter mime-type
# (Not for test scripts)
__do_curl_to_api() {
TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
paramError=0
input_url=$3
+ fname=$4
if [ $# -gt 0 ]; then
if [ $1 == "PA" ]; then
__ADAPTER=$PA_ADAPTER
__ADAPTER=$MR_STUB_ADAPTER
__ADAPTER_TYPE=$MR_STUB_ADAPTER_TYPE
__RETRY_CODES=""
- else
+ elif [ $1 == "DMAAPMR" ]; then
+ __ADAPTER=$MR_DMAAP_ADAPTER_HTTP
+ __ADAPTER_TYPE=$MR_DMAAP_ADAPTER_TYPE
+ __RETRY_CODES=""
+ else
paramError=1
fi
- if [ $__ADAPTER_TYPE == "MR-HTTP" ]; then
+ if [ "$__ADAPTER_TYPE" == "MR-HTTP" ]; then
__ADAPTER=$MR_ADAPTER_HTTP
fi
- if [ $__ADAPTER_TYPE == "MR-HTTPS" ]; then
+ if [ "$__ADAPTER_TYPE" == "MR-HTTPS" ]; then
__ADAPTER=$MR_ADAPTER_HTTPS
fi
fi
- if [ $# -lt 3 ] || [ $# -gt 4 ]; then
+ if [ $# -lt 3 ] || [ $# -gt 5 ]; then
paramError=1
else
timeout=""
fi
if [ $# -gt 3 ]; then
content=" -H Content-Type:application/json"
+ fname=$4
+ if [ $# -gt 4 ]; then
+ content=" -H Content-Type:"$5
+ fi
fi
if [ $2 == "GET" ] || [ $2 == "GET_BATCH" ]; then
oper="GET"
fi
elif [ $2 == "PUT" ] || [ $2 == "PUT_BATCH" ]; then
oper="PUT"
- if [ $# -eq 4 ]; then
- file=" --data-binary @$4"
+ if [ $# -gt 3 ]; then
+ file=" --data-binary @$fname"
fi
accept=" -H accept:application/json"
elif [ $2 == "POST" ] || [ $2 == "POST_BATCH" ]; then
oper="POST"
accept=" -H accept:*/*"
- if [ $# -eq 4 ]; then
- file=" --data-binary @$4"
+ if [ $# -gt 3 ]; then
+ file=" --data-binary @$fname"
accept=" -H accept:application/json"
fi
elif [ $2 == "DELETE" ] || [ $2 == "DELETE_BATCH" ]; then
oper=" -X "$oper
curlString="curl -k $proxyflag "${oper}${timeout}${httpcode}${accept}${content}${url}${file}
echo " CMD: "$curlString >> $HTTPLOG
- if [ $# -eq 4 ]; then
- echo " FILE: $(<$4)" >> $HTTPLOG
+ if [ $# -gt 3 ]; then
+ echo " FILE: $(<$fname)" >> $HTTPLOG
fi
# Do retry for configured response codes, otherwise only one attempt
else
if [ $oper != "RESPONSE" ]; then
requestUrl=$input_url
- if [ $2 == "PUT" ] && [ $# -eq 4 ]; then
- payload="$(cat $4 | tr -d '\n' | tr -d ' ' )"
+ if [ $2 == "PUT" ] && [ $# -gt 3 ]; then
+ payload="$(cat $fname | tr -d '\n' | tr -d ' ' )"
echo "payload: "$payload >> $HTTPLOG
file=" --data-binary "$payload
- elif [ $# -eq 4 ]; then
- echo " FILE: $(cat $4)" >> $HTTPLOG
+ elif [ $# -gt 3 ]; then
+ echo " FILE: $(cat $fname)" >> $HTTPLOG
fi
#urlencode the request url since it will be carried by send-request url
requestUrl=$(python3 -c "from __future__ import print_function; import urllib.parse, sys; print(urllib.parse.quote(sys.argv[1]))" "$input_url")
CBS_SERVICE_PATH="http://"$CBS_APP_NAME":"$CBS_INTERNAL_PORT
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CONSUL_statisics_setup() {
+ echo ""
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CBS_statisics_setup() {
+ echo ""
+}
#######################################################
__CP_initial_setup() {
use_control_panel_http
}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CP_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "CP $CONTROL_PANEL_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "CP $CONTROL_PANEL_APP_NAME"
+ fi
+}
+
#######################################################
# All resources shall be ordered to be scaled to 0, if relevant. If not relevant to scale, then do no action.
# This function is called for apps fully managed by the test script
__SDNC_kube_scale_zero() {
- __kube_scale_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+ __kube_scale_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
}
# Scale kubernetes resources to zero and wait until this has been accomplished, if relevant. If not relevant to scale, then do no action.
# Delete all kube resouces for the app
# This function is called for apps managed by the test script.
__SDNC_kube_delete_all() {
- __kube_delete_all_resources $KUBE_SNDC_NAMESPACE autotest SDNC
+ __kube_delete_all_resources $KUBE_SDNC_NAMESPACE autotest SDNC
}
# Store docker logs
# args: <log-dir> <file-prexix>
__SDNC_store_docker_logs() {
if [ $RUNMODE == "KUBE" ]; then
- kubectl logs -l "autotest=SDNC" -n $KUBE_SNDC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
- podname=$(kubectl get pods -n $KUBE_SNDC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
- kubectl exec -t -n $KUBE_SNDC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
+ kubectl logs -l "autotest=SDNC" -n $KUBE_SDNC_NAMESPACE --tail=-1 > $1$2_SDNC.log 2>&1
+ podname=$(kubectl get pods -n $KUBE_SDNC_NAMESPACE -l "autotest=SDNC" -o custom-columns=":metadata.name")
+ kubectl exec -t -n $KUBE_SDNC_NAMESPACE $podname -- cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
else
docker exec -t $SDNC_APP_NAME cat $SDNC_KARAF_LOG> $1$2_SDNC_karaf.log 2>&1
fi
use_sdnc_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__SDNC_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "SDNC $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE"
+ else
+ echo "SDNC $SDNC_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to SDNC
SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME":"$2 # docker access, container->container and script->container via proxy
SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME":"$1$SDNC_API_URL
if [ $RUNMODE == "KUBE" ]; then
- SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SNDC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
- SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SNDC_NAMESPACE":"$1$SDNC_API_URL
+ SDNC_SERVICE_PATH=$1"://"$SDNC_APP_NAME.$KUBE_SDNC_NAMESPACE":"$3 # kube access, pod->svc and script->svc via proxy
+ SDNC_SERVICE_API_PATH=$1"://"$SDNC_USER":"$SDNC_PWD"@"$SDNC_APP_NAME.KUBE_SDNC_NAMESPACE":"$1$SDNC_API_URL
fi
echo ""
# Export env vars for config files, docker compose and kube resources
# args:
__sdnc_export_vars() {
- export KUBE_SNDC_NAMESPACE
+ export KUBE_SDNC_NAMESPACE
export DOCKER_SIM_NWNAME
export SDNC_APP_NAME
if [ $retcode_p -eq 0 ]; then
echo -e " Using existing $SDNC_APP_NAME deployment and service"
echo " Setting SDNC replicas=1"
- __kube_scale deployment $SDNC_APP_NAME $KUBE_SNDC_NAMESPACE 1
+ __kube_scale deployment $SDNC_APP_NAME $KUBE_SDNC_NAMESPACE 1
fi
# Check if app shall be fully managed by the test script
echo -e " Creating $SDNC_APP_NAME app and expose service"
#Check if namespace exists, if not create it
- __kube_create_namespace $KUBE_SNDC_NAMESPACE
+ __kube_create_namespace $KUBE_SDNC_NAMESPACE
__sdnc_export_vars
use_cr_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__CR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "CR $CR_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "CR $CR_APP_NAME"
+ fi
+}
+
#######################################################
################
fi
# Service paths are used in test script to provide callbacck urls to app
CR_SERVICE_MR_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_MR #Only for messages from dmaap adapter/mediator
+ CR_SERVICE_TEXT_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK_TEXT #Callbacks for text payload
CR_SERVICE_APP_PATH=$CR_SERVICE_PATH$CR_APP_CALLBACK #For general callbacks from apps
# CR_ADAPTER used for switching between REST and DMAAP (only REST supported currently)
body=${res:0:${#res}-3}
targetJson=$3
+ if [ $targetJson == "EMPTY" ] && [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ fi
echo " TARGET JSON: $targetJson" >> $HTTPLOG
res=$(python3 ../common/compare_json.py "$targetJson" "$body")
return 1
fi
+ __log_test_pass
+ return 0
+}
+
+# CR API: Check a single (oldest) json in md5 format (or none if empty) for path.
+# Note that if a json message is given, it shall be compact, no ws except inside string.
+# The MD5 will generate different hash if ws is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-msg> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <topic-url> (EMPTY | <data-msg> )" $@
+ return 1
+ fi
+
+ query="/get-event/"$2
+ res="$(__do_curl_to_api CR GET $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+ body=${res:0:${#res}-3}
+ if [ $3 == "EMPTY" ]; then
+ if [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ else
+ __log_test_pass
+ return 0
+ fi
+ fi
+ command -v md5 > /dev/null # Mac
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$3" | md5)
+ else
+ command -v md5sum > /dev/null # Linux
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$3" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd
+ else
+ __log_test_fail_general "Command md5 nor md5sum is available"
+ return 1
+ fi
+ fi
+ targetMd5="\""$targetMd5"\"" #Quotes needed
+
+ echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+ if [ "$body" != "$targetMd5" ]; then
+ __log_test_fail_body
+ return 1
+ fi
+
+ __log_test_pass
+ return 0
+}
+
+# CR API: Check a single (oldest) event in md5 format (or none if empty) for path.
+# Note that if a file with json message is given, the json shall be compact, no ws except inside string and not newlines.
+# The MD5 will generate different hash if ws/newlines is present or not in otherwise equivalent json
+# arg: <response-code> <topic-url> (EMPTY | <data-file> )
+# (Function for test scripts)
+cr_api_check_single_genric_event_md5_file() {
+ __log_test_start $@
+
+ if [ $# -ne 3 ]; then
+ __print_err "<response-code> <topic-url> (EMPTY | <data-file> )" $@
+ return 1
+ fi
+
+ query="/get-event/"$2
+ res="$(__do_curl_to_api CR GET $query)"
+ status=${res:${#res}-3}
+
+ if [ $status -ne $1 ]; then
+ __log_test_fail_status_code $1 $status
+ return 1
+ fi
+ body=${res:0:${#res}-3}
+ if [ $3 == "EMPTY" ]; then
+ if [ ${#body} -ne 0 ]; then
+ __log_test_fail_body
+ return 1
+ else
+ __log_test_pass
+ return 0
+ fi
+ fi
+
+ if [ ! -f $3 ]; then
+ __log_test_fail_general "File $3 does not exist"
+ return 1
+ fi
+
+ filedata=$(cat $3)
+
+ command -v md5 > /dev/null # Mac
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$filedata" | md5)
+ else
+ command -v md5sum > /dev/null # Linux
+ if [ $? -eq 0 ]; then
+ targetMd5=$(echo -n "$filedata" | md5sum | cut -d' ' -f 1) # Need to cut additional info printed by cmd
+ else
+ __log_test_fail_general "Command md5 nor md5sum is available"
+ return 1
+ fi
+ fi
+ targetMd5="\""$targetMd5"\"" #Quotes needed
+
+ echo " TARGET MD5 hash: $targetMd5" >> $HTTPLOG
+
+ if [ "$body" != "$targetMd5" ]; then
+ __log_test_fail_body
+ return 1
+ fi
+
__log_test_pass
return 0
}
\ No newline at end of file
use_dmaapadp_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPADP_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "DMAAPADP $DMAAP_ADP_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "DMAAPADP $DMAAP_ADP_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap adapter
use_dmaapmed_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMED_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "DMAAPMED $DMAAP_MED_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "DMAAPMED $DMAAP_MED_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Dmaap mediator
use_ecs_rest_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__ECS_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "ECS $ECS_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "ECS $ECS_APP_NAME"
+ fi
+}
+
#######################################################
use_gateway_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__NGW_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "NGW $NRT_GATEWAY_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "NGW $NRT_GATEWAY_APP_NAME"
+ fi
+}
+
#######################################################
--- /dev/null
+#!/bin/bash
+
+# ============LICENSE_START===============================================
+# Copyright (C) 2020 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+# This script collects container statistics to a file. Data is separated with semicolon.
+# Works for both docker container and kubernetes pods.
+# Relies on 'docker stats' so will not work for other container runtimes.
+# Used by the test env.
+
+# args: docker <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*
+# or
+# args: kube <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*
+
+print_usage() {
+ echo "Usage: genstat.sh DOCKER <start-time-seconds> <log-file> <app-short-name> <app-name> [ <app-short-name> <app-name> ]*"
+ echo "or"
+ echo "Usage: genstat.sh KUBE <start-time-seconds> <log-file> <app-short-name> <app-name> <namespace> [ <app-short-name> <app-name> <namespace> ]*"
+}
+
+STARTTIME=-1
+
+if [ $# -lt 4 ]; then
+ print_usage
+ exit 1
+fi
+if [ $1 == "DOCKER" ]; then
+ STAT_TYPE=$1
+ shift
+ STARTTIME=$1
+ shift
+ LOGFILE=$1
+ shift
+ if [ $(($#%2)) -ne 0 ]; then
+ print_usage
+ exit 1
+ fi
+elif [ $1 == "KUBE" ]; then
+ STAT_TYPE=$1
+ shift
+ STARTTIME=$1
+ shift
+ LOGFILE=$1
+ shift
+ if [ $(($#%3)) -ne 0 ]; then
+ print_usage
+ exit 1
+ fi
+else
+ print_usage
+ exit 1
+fi
+
+
+echo "Time;Name;PIDS;CPU perc;Mem perc" > $LOGFILE
+
+if [ "$STARTTIME" -ne -1 ]; then
+ STARTTIME=$(($SECONDS-$STARTTIME))
+fi
+
+while [ true ]; do
+ docker stats --no-stream --format "table {{.Name}};{{.PIDs}};{{.CPUPerc}};{{.MemPerc}}" > tmp/.tmp_stat_out.txt
+ if [ "$STARTTIME" -eq -1 ]; then
+ STARTTIME=$SECONDS
+ fi
+ CTIME=$(($SECONDS-$STARTTIME))
+
+ TMP_APPS=""
+
+ while read -r line; do
+ APP_LIST=(${@})
+ if [ $STAT_TYPE == "DOCKER" ]; then
+ for ((i=0; i<$#; i=i+2)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ d=$(echo $line | grep -v "k8s" | grep $APP)
+ if [ ! -z $d ]; then
+ d=$(echo $d | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+ echo "$SAPP;$CTIME;$d" >> $LOGFILE
+ TMP_APPS=$TMP_APPS" $SAPP "
+ fi
+ done
+ else
+ for ((i=0; i<$#; i=i+3)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ NS=${APP_LIST[$i+2]}
+ d=$(echo "$line" | grep -v "k8s_POD" | grep "k8s" | grep $APP | grep $NS)
+ if [ ! -z "$d" ]; then
+ d=$(echo "$d" | cut -d';' -f 2- | sed -e 's/%//g' | sed 's/\./,/g')
+ data="$SAPP-$NS;$CTIME;$d"
+ echo $data >> $LOGFILE
+ TMP_APPS=$TMP_APPS" $SAPP-$NS "
+ fi
+ done
+ fi
+ done < tmp/.tmp_stat_out.txt
+
+ APP_LIST=(${@})
+ if [ $STAT_TYPE == "DOCKER" ]; then
+ for ((i=0; i<$#; i=i+2)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ if [[ $TMP_APPS != *" $SAPP "* ]]; then
+ data="$SAPP;$CTIME;0;0,00;0,00"
+ echo $data >> $LOGFILE
+ fi
+ done
+ else
+ for ((i=0; i<$#; i=i+3)); do
+ SAPP=${APP_LIST[$i]}
+ APP=${APP_LIST[$i+1]}
+ NS=${APP_LIST[$i+2]}
+ if [[ $TMP_APPS != *" $SAPP-$NS "* ]]; then
+ data="$SAPP-$NS;$CTIME;0;0,00;0,00"
+ echo $data >> $LOGFILE
+ fi
+ done
+ fi
+ sleep 1
+done
:
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__HTTPPROXY_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "HTTPPROXY $HTTP_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "HTTPPROXY $HTTP_PROXY_APP_NAME"
+ fi
+}
+
#######################################################
use_kube_proxy_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__KUBEPROXY_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "KUBEPROXXY $KUBE_PROXY_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "KUBEPROXXY $KUBE_PROXY_APP_NAME"
+ fi
+}
+
#######################################################
## Access to Kube http proxy
: # handle by __MR_initial_setup
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__MR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "MR $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE"
+ else
+ echo "MR $MR_STUB_APP_NAME"
+ fi
+}
+
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__DMAAPMR_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo ""
+ else
+ echo ""
+ fi
+}
#######################################################
+# Description of port mappings when running MR-STUB only or MR-STUB + MESSAGE-ROUTER
+#
+# 'MR-STUB only' is started when only 'MR' is included in the test script. Both the test scripts and app will then use MR-STUB as a message-router simulator.
+#
+# 'MR-STUB + MESSAGE-ROUTER' is started when 'MR' and 'DMAAPMR' is included in the testscripts. DMAAPMR is the real message router including kafka and zookeeper.
+# In this configuration, MR-STUB is used by the test-script as frontend to the message-router while app are using the real message-router.
+#
+# DOCKER KUBE
+# ---------------------------------------------------------------------------------------------------------------------------------------------------
+
+# MR-STUB MR-STUB
+# +++++++ +++++++
+# localhost container service pod
+# ==============================================================================================================================================
+# 10 MR_STUB_LOCALHOST_PORT -> 13 MR_INTERNAL_PORT 15 MR_EXTERNAL_PORT -> 17 MR_INTERNAL_PORT
+# 12 MR_STUB_LOCALHOST_SECURE_PORT -> 14 MR_INTERNAL_SECURE_PORT 16 MR_EXTERNAL_SECURE_PORT -> 18 MR_INTERNAL_SECURE_PORT
+
+
+
+# MESSAGE-ROUTER MESSAGE-ROUTER
+# ++++++++++++++ ++++++++++++++
+# localhost container service pod
+# ===================================================================================================================================================
+# 20 MR_DMAAP_LOCALHOST_PORT -> 23 MR_INTERNAL_PORT 25 MR_EXTERNAL_PORT -> 27 MR_INTERNAL_PORT
+# 22 MR_DMAAP_LOCALHOST_SECURE_PORT -> 24 MR_INTERNAL_SECURE_PORT 26 MR_EXTERNAL_SECURE_PORT -> 28 MR_INTERNAL_SECURE_PORT
+
+
+# Running only the MR-STUB - apps using MR-STUB
+# DOCKER KUBE
+# localhost: 10 and 12 -
+# via proxy (script): 13 and 14 via proxy (script): 15 and 16
+# apps: 13 and 14 apps: 15 and 16
+
+# Running MR-STUB (as frontend for test script) and MESSAGE-ROUTER - apps using MESSAGE-ROUTER
+# DOCKER KUBE
+# localhost: 10 and 12 -
+# via proxy (script): 13 and 14 via proxy (script): 15 and 16
+# apps: 23 and 24 apps: 25 and 26
+#
+
+
+
use_mr_http() {
- __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+ __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
}
use_mr_https() {
- __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+ __mr_set_protocoll "https" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
}
# Setup paths to svc/container for internal and external access
-# args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# args: <protocol> <internal-port> <external-port> <internal-secure-port> <external-secure-port>
__mr_set_protocoll() {
echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
MR_HTTPX=$1
+ if [ $MR_HTTPX == "http" ]; then
+ INT_PORT=$2
+ EXT_PORT=$3
+ else
+ INT_PORT=$4
+ EXT_PORT=$5
+ fi
+
# Access via test script
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+ MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$INT_PORT # access from script via proxy, docker
+ MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$INT_PORT # access from script via proxy, docker
+ MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube
+ MR_KAFKA_SERVICE_PATH=""
__check_included_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME":"$MR_KAFKA_PORT
fi
# For directing calls from script to e.g.PMS via message rounter
- # Theses case shall always go though the mr-stub
- MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
- MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+ # These cases shall always go though the mr-stub
+ MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$2
+ MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$4
+
+ MR_DMAAP_ADAPTER_TYPE="REST"
+
+
if [ $RUNMODE == "KUBE" ]; then
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+ MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
+ MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$EXT_PORT # access from script via proxy, kube
MR_SERVICE_PATH=$MR_STUB_PATH
__check_included_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
fi
__check_prestarted_image "DMAAPMR"
if [ $? -eq 0 ]; then
MR_SERVICE_PATH=$MR_DMAAP_PATH
+ MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+ MR_KAFKA_SERVICE_PATH=$MR_KAFKA_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_KAFKA_PORT
fi
# For directing calls from script to e.g.PMS, via message rounter
# These calls shall always go though the mr-stub
- MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
- MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+ MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3
+ MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$5
fi
# For calls from script to the mr-stub
MR_STUB_ADAPTER_TYPE="REST"
echo ""
+
}
+
+# use_mr_http() { 2 3 4 5 6 7
+# __mr_set_protocoll "http" $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_PORT $MR_EXTERNAL_PORT $MR_INTERNAL_SECURE_PORT $MR_EXT_SECURE_PORT
+# }
+
+# use_mr_https() {
+# __mr_set_protocoll "https" $MR_INTERNAL_SECURE_PORT $MR_EXTERNAL_SECURE_PORT
+# }
+
+# # Setup paths to svc/container for internal and external access
+# # args: <protocol> <internal-port> <external-port> <mr-stub-internal-port> <mr-stub-external-port> <mr-stub-internal-secure-port> <mr-stub-external-secure-port>
+# __mr_set_protocoll() {
+# echo -e $BOLD"$MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME protocol setting"$EBOLD
+# echo -e " Using $BOLD http $EBOLD towards $MR_STUB_DISPLAY_NAME and $MR_DMAAP_DISPLAY_NAME"
+
+# ## Access to Dmaap mediator
+
+# MR_HTTPX=$1
+
+# # Access via test script
+# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME":"$2 # access from script via proxy, docker
+# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME":"$2 # access from script via proxy, docker
+# MR_DMAAP_ADAPTER_HTTP="" # Access to dmaap mr via proyx - set only if app is included
+
+# MR_SERVICE_PATH=$MR_STUB_PATH # access container->container, docker - access pod->svc, kube
+# __check_included_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH # access container->container, docker - access pod->svc, kube
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+
+# # For directing calls from script to e.g.PMS via message rounter
+# # These cases shall always go though the mr-stub
+# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$4
+# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$6
+
+# MR_DMAAP_ADAPTER_TYPE="REST"
+
+# if [ $RUNMODE == "KUBE" ]; then
+# MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+# MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE":"$3 # access from script via proxy, kube
+
+# MR_SERVICE_PATH=$MR_STUB_PATH
+# __check_included_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+# __check_prestarted_image "DMAAPMR"
+# if [ $? -eq 0 ]; then
+# MR_SERVICE_PATH=$MR_DMAAP_PATH
+# MR_DMAAP_ADAPTER_HTTP=$MR_DMAAP_PATH
+# fi
+
+# # For directing calls from script to e.g.PMS, via message rounter
+# # These calls shall always go though the mr-stub
+# MR_ADAPTER_HTTP="http://"$MR_STUB_APP_NAME":"$5
+# MR_ADAPTER_HTTPS="https://"$MR_STUB_APP_NAME":"$7
+# fi
+
+# # For calls from script to the mr-stub
+# MR_STUB_ADAPTER=$MR_STUB_PATH
+# MR_STUB_ADAPTER_TYPE="REST"
+
+# echo ""
+
+# }
+
# Export env vars for config files, docker compose and kube resources
# args: -
__dmaapmr_export_vars() {
export MR_DMAAP_LOCALHOST_SECURE_PORT
export MR_INTERNAL_SECURE_PORT
export MR_DMAAP_HOST_MNT_DIR
+
+ export KUBE_ONAP_NAMESPACE
+ export MR_EXTERNAL_PORT
+ export MR_EXTERNAL_SECURE_PORT
+ export MR_KAFKA_PORT
+ export MR_ZOOKEEPER_PORT
+
+ export MR_KAFKA_SERVICE_PATH
}
# Export env vars for config files, docker compose and kube resources
export MRSTUB_IMAGE
export MR_INTERNAL_PORT
export MR_INTERNAL_SECURE_PORT
+ export MR_EXTERNAL_PORT
+ export MR_EXTERNAL_SECURE_PORT
export MR_STUB_LOCALHOST_PORT
export MR_STUB_LOCALHOST_SECURE_PORT
export MR_STUB_CERT_MOUNT_DIR
export MR_STUB_DISPLAY_NAME
+
+ export KUBE_ONAP_NAMESPACE
+ export MR_EXTERNAL_PORT
+
+ export MR_KAFKA_SERVICE_PATH
}
__dmaapmr_export_vars
- #export MR_DMAAP_APP_NAME
- export MR_DMAAP_KUBE_APP_NAME=message-router
- MR_DMAAP_APP_NAME=$MR_DMAAP_KUBE_APP_NAME
- export KUBE_ONAP_NAMESPACE
- export MR_EXTERNAL_PORT
- export MR_INTERNAL_PORT
- export MR_EXTERNAL_SECURE_PORT
- export MR_INTERNAL_SECURE_PORT
- export ONAP_DMAAPMR_IMAGE
-
- export MR_KAFKA_BWDS_NAME=akfak-bwds
- export MR_KAFKA_BWDS_NAME=kaka
- export KUBE_ONAP_NAMESPACE
-
- export MR_ZOOKEEPER_APP_NAME
- export ONAP_ZOOKEEPER_IMAGE
-
#Check if onap namespace exists, if not create it
__kube_create_namespace $KUBE_ONAP_NAMESPACE
- # TODO - Fix domain name substitution in the prop file
- # Create config maps - dmaapmr app
- configfile=$PWD/tmp/MsgRtrApi.properties
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/KUBE-MsgRtrApi.properties $configfile
+ # copy config files
+ MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+ cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH
+ # Create config maps - dmaapmr app
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
output_yaml=$PWD/tmp/dmaapmr_msgrtrapi_cfc.yaml
__kube_create_configmap dmaapmr-msgrtrapi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
- configfile=$PWD/tmp/logback.xml
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/logback.xml $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/logback.xml
output_yaml=$PWD/tmp/dmaapmr_logback_cfc.yaml
__kube_create_configmap dmaapmr-logback.xml $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
- configfile=$PWD/tmp/cadi.properties
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/mr/cadi.properties $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/cadi.properties
output_yaml=$PWD/tmp/dmaapmr_cadi_cfc.yaml
__kube_create_configmap dmaapmr-cadi.properties $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
# Create config maps - kafka app
- configfile=$PWD/tmp/zk_client_jaas.conf
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/kafka/zk_client_jaas.conf $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/kafka/zk_client_jaas.conf
output_yaml=$PWD/tmp/dmaapmr_zk_client_cfc.yaml
__kube_create_configmap dmaapmr-zk-client-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
# Create config maps - zookeeper app
- configfile=$PWD/tmp/zk_server_jaas.conf
- cp $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR"$MR_DMAAP_HOST_MNT_DIR"/zk/zk_server_jaas.conf $configfile
+ configfile=$MR_MNT_CONFIG_BASEPATH/zk/zk_server_jaas.conf
output_yaml=$PWD/tmp/dmaapmr_zk_server_cfc.yaml
__kube_create_configmap dmaapmr-zk-server-jaas.conf $KUBE_ONAP_NAMESPACE autotest DMAAPMR $configfile $output_yaml
__kube_create_instance app $MR_DMAAP_APP_NAME $input_yaml $output_yaml
- echo " Retrieving host and ports for service..."
- MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
+ # echo " Retrieving host and ports for service..."
+ # MR_DMAAP_HOST_NAME=$(__kube_get_service_host $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE)
- MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
- MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+ # MR_EXT_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+ # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_DMAAP_APP_NAME $KUBE_ONAP_NAMESPACE "https")
- echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
- MR_SERVICE_PATH=""
- if [ $MR_HTTPX == "http" ]; then
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
- else
- MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # echo " Host IP, http port, https port: $MR_DMAAP_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+ # MR_SERVICE_PATH=""
+ # if [ $MR_HTTPX == "http" ]; then
+ # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_PORT
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+ # else
+ # MR_DMAAP_PATH=$MR_HTTPX"://"$MR_DMAAP_HOST_NAME":"$MR_EXT_SECURE_PORT
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_DMAAP_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # fi
+
+ __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+
+ # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+ #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+ #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+# __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+#
+# __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+
+
+ #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
+
+ if [ $# -gt 0 ]; then
+ if [ $(($#%3)) -eq 0 ]; then
+ while [ $# -gt 0 ]; do
+ __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+ shift; shift; shift;
+ done
+ else
+ echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+ echo -e $RED" Got: $@"$ERED
+ exit 1
+ fi
fi
- __check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
+ echo " Current topics:"
+ curlString="$MR_DMAAP_PATH/topics"
+ result=$(__do_curl "$curlString")
+ echo $result | indent2
fi
if [ $retcode_included_mr -eq 0 ]; then
- #exporting needed var for deployment
- export MR_STUB_APP_NAME
- export KUBE_ONAP_NAMESPACE
- export MRSTUB_IMAGE
- export MR_INTERNAL_PORT
- export MR_INTERNAL_SECURE_PORT
- export MR_EXTERNAL_PORT
- export MR_EXTERNAL_SECURE_PORT
+
+ __mr_export_vars
if [ $retcode_prestarted_dmaapmr -eq 0 ] || [ $retcode_included_dmaapmr -eq 0 ]; then # Set topics for dmaap
export TOPIC_READ="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+ export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME.$KUBE_ONAP_NAMESPACE:$MR_INTERNAL_PORT"
else
export TOPIC_READ=""
export TOPIC_WRITE=""
+ export GENERIC_TOPICS_UPLOAD_BASEURL=""
fi
#Check if onap namespace exists, if not create it
fi
-
- echo " Retrieving host and ports for service..."
- MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
-
- MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
- MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
-
- echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
- if [ $MR_HTTPX == "http" ]; then
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
- if [ -z "$MR_SERVICE_PATH" ]; then
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
- fi
- else
- MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
- if [ -z "$MR_SERVICE_PATH" ]; then
- MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
- fi
- fi
- MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
- MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
-
- MR_STUB_ADAPTER=$MR_STUB_PATH
- MR_STUB_ADAPTER_TYPE="REST"
+ # echo " Retrieving host and ports for service..."
+ # MR_STUB_HOST_NAME=$(__kube_get_service_host $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE)
+
+ # MR_EXT_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "http")
+ # MR_EXT_SECURE_PORT=$(__kube_get_service_port $MR_STUB_APP_NAME $KUBE_ONAP_NAMESPACE "https")
+
+ # echo " Host IP, http port, https port: $MR_STUB_APP_NAME $MR_EXT_PORT $MR_EXT_SECURE_PORT"
+ # if [ $MR_HTTPX == "http" ]; then
+ # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+ # if [ -z "$MR_SERVICE_PATH" ]; then
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_PORT
+ # fi
+ # else
+ # MR_STUB_PATH=$MR_HTTPX"://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+ # if [ -z "$MR_SERVICE_PATH" ]; then
+ # MR_SERVICE_PATH=$MR_HTTPX"://"$MR_STUB_APP_NAME"."$KUBE_ONAP_NAMESPACE":"$MR_EXT_SECURE_PORT
+ # fi
+ # fi
+ # MR_ADAPTER_HTTP="http://"$MR_STUB_HOST_NAME":"$MR_EXT_PORT
+ # MR_ADAPTER_HTTPS="https://"$MR_STUB_HOST_NAME":"$MR_EXT_SECURE_PORT
+
+ # MR_STUB_ADAPTER=$MR_STUB_PATH
+ # MR_STUB_ADAPTER_TYPE="REST"
__check_service_start $MR_STUB_APP_NAME $MR_STUB_PATH$MR_STUB_ALIVE_URL
export TOPIC_READ=""
export TOPIC_WRITE=""
+ export GENERIC_TOPICS_UPLOAD_BASEURL=""
if [ $retcode_dmaapmr -eq 0 ]; then # Set topics for dmaap
export TOPIC_READ="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_READ_TOPIC"
export TOPIC_WRITE="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=15000&limit=100"
+ export GENERIC_TOPICS_UPLOAD_BASEURL="http://$MR_DMAAP_APP_NAME:$MR_INTERNAL_PORT"
fi
__dmaapmr_export_vars
if [ $retcode_dmaapmr -eq 0 ]; then
+
+ # copy config files
+ MR_MNT_CONFIG_BASEPATH=$SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_MNT_DIR
+ cp -r $SIM_GROUP"/"$MR_DMAAP_COMPOSE_DIR$MR_DMAAP_HOST_CONFIG_DIR/* $MR_MNT_CONFIG_BASEPATH
+
+ # substitute vars
+ configfile=$MR_MNT_CONFIG_BASEPATH/mr/MsgRtrApi.properties
+ cp $configfile $configfile"_tmp"
+ envsubst < $configfile"_tmp" > $configfile
+
__start_container $MR_DMAAP_COMPOSE_DIR "" NODOCKERARGS 1 $MR_DMAAP_APP_NAME
__check_service_start $MR_DMAAP_APP_NAME $MR_DMAAP_PATH$MR_DMAAP_ALIVE_URL
- __create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+ # Cannot create topics, returns 400 forever.....topics will be created during pipeclean below
+ #__create_topic $MR_READ_TOPIC "Topic for reading policy messages"
+
+ #__create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+
+ #__dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
- __create_topic $MR_WRITE_TOPIC "Topic for writing policy messages"
+ #__dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
- __dmaap_pipeclean $MR_READ_TOPIC "/events/$MR_READ_TOPIC" "/events/$MR_READ_TOPIC/users/policy-agent?timeout=1000&limit=100"
+ if [ $# -gt 0 ]; then
+ if [ $(($#%3)) -eq 0 ]; then
+ while [ $# -gt 0 ]; do
+ __dmaap_pipeclean "$1" "$2/$1" "$2/$1/$3?timeout=1000&limit=100"
+ shift; shift; shift;
+ done
+ else
+ echo -e $RED" args: start_mr [<topic-name> <base-url> <group-and-user-url>]*"$ERED
+ echo -e $RED" Got: $@"$ERED
+ exit 1
+ fi
+ fi
- __dmaap_pipeclean $MR_WRITE_TOPIC "/events/$MR_WRITE_TOPIC" "/events/$MR_WRITE_TOPIC/users/mr-stub?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json" "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=1000&limit=100"
+ #__dmaap_pipeclean "unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json" "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=1000&limit=100"
echo " Current topics:"
curlString="$MR_DMAAP_PATH/topics"
# Create a dmaap mr topic
# args: <topic name> <topic-description>
__create_topic() {
- echo -ne " Creating read topic: $1"$SAMELINE
+ echo -ne " Creating topic: $1"$SAMELINE
json_topic="{\"topicName\":\"$1\",\"partitionCount\":\"2\", \"replicationCount\":\"3\", \"transactionEnabled\":\"false\",\"topicDescription\":\"$2\"}"
- echo $json_topic > ./tmp/$1.json
+ fname="./tmp/$1.json"
+ echo $json_topic > $fname
- curlString="$MR_DMAAP_PATH/topics/create -X POST -H Content-Type:application/json -d@./tmp/$1.json"
- topic_retries=5
+ query="/topics/create"
+ topic_retries=10
while [ $topic_retries -gt 0 ]; do
let topic_retries=topic_retries-1
- result=$(__do_curl "$curlString")
- if [ $? -eq 0 ]; then
+ res="$(__do_curl_to_api DMAAPMR POST $query $fname)"
+ status=${res:${#res}-3}
+
+ if [[ $status == "2"* ]]; then
topic_retries=0
- echo -e " Creating read topic: $1 $GREEN OK $EGREEN"
- fi
- if [ $? -ne 0 ]; then
+ echo -e " Creating topic: $1 $GREEN OK $EGREEN"
+ else
if [ $topic_retries -eq 0 ]; then
- echo -e " Creating read topic: $1 $RED Failed $ERED"
+ echo -e " Creating topic: $1 $RED Failed $ERED"
((RES_CONF_FAIL++))
return 1
else
fi
fi
done
+ echo
return 0
}
# Do a pipeclean of a topic - to overcome dmaap mr bug...
-# args: <topic> <post-url> <read-url>
+# args: <topic> <post-url> <read-url> [<num-retries>]
__dmaap_pipeclean() {
pipeclean_retries=50
+ if [ $# -eq 4 ]; then
+ pipeclean_retries=$4
+ fi
echo -ne " Doing dmaap-mr pipe cleaning on topic: $1"$SAMELINE
while [ $pipeclean_retries -gt 0 ]; do
- echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/pipeclean.json
+ if [[ $1 == *".text" ]]; then
+ echo "pipeclean-$1:$pipeclean_retries" > ./tmp/__dmaap_pipeclean.txt
+ curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:text/plain -d@./tmp/__dmaap_pipeclean.txt"
+ else
+ echo "{\"pipeclean-$1\":$pipeclean_retries}" > ./tmp/__dmaap_pipeclean.json
+ curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/__dmaap_pipeclean.json"
+ fi
let pipeclean_retries=pipeclean_retries-1
- curlString="$MR_DMAAP_PATH$2 -X POST -H Content-Type:application/json -d@./tmp/pipeclean.json"
result=$(__do_curl "$curlString")
if [ $? -ne 0 ]; then
sleep 1
# arg: <topic-url> <json-msg>
# (Function for test scripts)
mr_api_send_json() {
- __log_test_start $@
+ __log_conf_start $@
if [ $# -ne 2 ]; then
__print_err "<topic-url> <json-msg>" $@
return 1
status=${res:${#res}-3}
if [ $status -ne 200 ]; then
- __log_test_fail_status_code 200 $status
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send text to topic in mr-stub.
+# arg: <topic-url> <text-msg>
+# (Function for test scripts)
+mr_api_send_text() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-msg>" $@
+ return 1
+ fi
+ query=$1
+ fname=$PWD/tmp/text_payload_to_mr.txt
+ echo $2 > $fname
+ res="$(__do_curl_to_api MRSTUB POST $query $fname text/plain)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send json file to topic in mr-stub.
+# arg: <topic-url> <json-file>
+# (Function for test scripts)
+mr_api_send_json_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <json-file>" $@
+ return 1
+ fi
+ query=$1
+ if [ ! -f $2 ]; then
+ __log_test_fail_general "File $2 does not exist"
+ return 1
+ fi
+ #Create json array for mr
+ datafile="tmp/mr_api_send_json_file.json"
+ { echo -n "[" ; cat $2 ; echo -n "]" ;} > $datafile
+
+ res="$(__do_curl_to_api MRSTUB POST $query $datafile)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
+ return 1
+ fi
+
+ __log_conf_ok
+ return 0
+}
+
+# Send text file to topic in mr-stub.
+# arg: <topic-url> <text-file>
+# (Function for test scripts)
+mr_api_send_text_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-file>" $@
+ return 1
+ fi
+ query=$1
+ if [ ! -f $2 ]; then
+ __log_test_fail_general "File $2 does not exist"
+ return 1
+ fi
+
+ res="$(__do_curl_to_api MRSTUB POST $query $2 text/plain)"
+
+ status=${res:${#res}-3}
+ if [ $status -ne 200 ]; then
+ __log_conf_fail_status_code 200 $status
return 1
fi
- __log_test_pass
+ __log_conf_ok
+ return 0
+}
+
+# Create json file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_json_payload_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <json-file>" $@
+ return 1
+ fi
+ if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+ __log_conf_fail_general "Only size between 1k and 10000k supported"
+ return 1
+ fi
+ echo -n "{\"a\":[" > $2
+ LEN=$(($1*150))
+ echo -n "\"a0\"" >> $2
+ for ((idx=1; idx<$LEN; idx++))
+ do
+ echo -n ",\"a$idx\"" >> $2
+ done
+ echo -n "]}" >> $2
+
+ __log_conf_ok
+ return 0
+}
+
+# Create tet file for payload
+# arg: <size-in-kb> <filename>
+mr_api_generate_text_payload_file() {
+ __log_conf_start $@
+ if [ $# -ne 2 ]; then
+ __print_err "<topic-url> <text-file>" $@
+ return 1
+ fi
+ if [ $1 -lt 1 ] || [ $1 -gt 10000 ]; then
+ __log_conf_fail_general "Only size between 1k and 10000k supported"
+ return 1
+ fi
+ echo -n "" > $2
+ LEN=$(($1*100))
+ for ((idx=0; idx<$LEN; idx++))
+ do
+ echo -n "ABCDEFGHIJ" >> $2
+ done
+
+ __log_conf_ok
return 0
}
use_prod_stub_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PRODSTUB_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "PRODSTUB $PROD_STUB_APP_NAME $KUBE_SIM_NAMESPACE"
+ else
+ echo "PRODSTUB $PROD_STUB_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Prod stub sim
:
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__PVCCLEANER_statisics_setup() {
+ echo ""
+}
+
#######################################################
# This is a system app, all usage in testcase_common.sh
\ No newline at end of file
use_rapp_catalogue_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RC_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo "RC $RAPP_CAT_APP_NAME $KUBE_NONRTRIC_NAMESPACE"
+ else
+ echo "RC $RAPP_CAT_APP_NAME"
+ fi
+}
+
#######################################################
# Set http as the protocol to use for all communication to the Rapp catalogue
use_simulator_http
}
+# Set app short-name, app name and namespace for logging runtime statistics of kubernets pods or docker containers
+# For docker, the namespace shall be excluded
+# This function is called for apps managed by the test script as well as for prestarted apps.
+# args: -
+__RICSIM_statisics_setup() {
+ if [ $RUNMODE == "KUBE" ]; then
+ echo ""
+ else
+ echo ""
+ fi
+}
+
#######################################################
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
POLICY_AGENT_DATA_FILE="application_configuration.json" # Container data file name
POLICY_AGENT_CONTAINER_MNT_DIR="/var/policy-management-service" # Mounted dir in the container
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
# Policy Agent image and tags
POLICY_AGENT_IMAGE_BASE="onap/ccsdk-oran-a1policymanagementservice"
-POLICY_AGENT_IMAGE_TAG_LOCAL="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.3.0-SNAPSHOT"
-POLICY_AGENT_IMAGE_TAG_REMOTE="1.3.0-STAGING-latest" #Will use snapshot repo
-POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.3.0"
+POLICY_AGENT_IMAGE_TAG_LOCAL="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE_SNAPSHOT="1.2.4-SNAPSHOT"
+POLICY_AGENT_IMAGE_TAG_REMOTE="1.2.4-STAGING-latest" #Will use snapshot repo
+POLICY_AGENT_IMAGE_TAG_REMOTE_RELEASE="1.2.3"
# SDNC A1 Controller remote image and tag
SDNC_A1_CONTROLLER_IMAGE_BASE="onap/sdnc-image"
#ONAP Zookeeper remote image and tag
ONAP_ZOOKEEPER_IMAGE_BASE="onap/dmaap/zookeeper"
-ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.0.3"
+ONAP_ZOOKEEPER_IMAGE_TAG_REMOTE_RELEASE_ONAP="6.1.0"
#No local image for ONAP Zookeeper, remote image always used
#ONAP Kafka remote image and tag
ONAP_KAFKA_IMAGE_BASE="onap/dmaap/kafka111"
-ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.0.4"
+ONAP_KAFKA_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.1"
#No local image for ONAP Kafka, remote image always used
#ONAP DMAAP-MR remote image and tag
ONAP_DMAAPMR_IMAGE_BASE="onap/dmaap/dmaap-mr"
-ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.1.18"
+ONAP_DMAAPMR_IMAGE_TAG_REMOTE_RELEASE_ONAP="1.3.0"
#No local image for ONAP DMAAP-MR, remote image always used
#Kube proxy remote image and tag
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are added in the producer registration
ECS_FEATURE_LEVEL="" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback Reciever"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_NONRTRIC_NAMESPACE="nonrtric" # Namespace for all nonrtric components
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
-KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
+KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback receiver"
CR_EXTERNAL_SECURE_PORT=8091 # Callback receiver container external secure port (host -> container)
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
+CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_WEB_INTERNAL_PORT=8081 # Kube Http Proxy container internal port (container -> container)
KUBE_PROXY_WEB_EXTERNAL_SECURE_PORT=8783 # Kube Proxy container external secure port (host -> container)
KUBE_PROXY_WEB_INTERNAL_SECURE_PORT=8434 # Kube Proxy container internal secure port (container -> container
+
+KUBE_PROXY_DOCKER_EXTERNAL_PORT=8732 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_DOCKER_EXTERNAL_SECURE_PORT=8784 # Kube Proxy container external secure port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_PORT=8733 # Kube Http Proxy container external port, doocker (host -> container)
+KUBE_PROXY_WEB_DOCKER_EXTERNAL_SECURE_PORT=8785 # Kube Proxy container external secure port, doocker (host -> container)
+
KUBE_PROXY_PATH="" # Proxy url path, will be set if proxy is started
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
KUBE_SIM_NAMESPACE="nonrtric-ft" # Namespace for simulators (except MR and RICSIM)
KUBE_A1SIM_NAMESPACE="a1-sim" # Namespace for a1-p simulators (RICSIM)
KUBE_ONAP_NAMESPACE="onap" # Namespace for onap (only message router)
-KUBE_SNDC_NAMESPACE="onap" # Namespace for sdnc
+KUBE_SDNC_NAMESPACE="onap" # Namespace for sdnc
POLICY_AGENT_EXTERNAL_PORT=8081 # Policy Agent container external port (host -> container)
POLICY_AGENT_INTERNAL_PORT=8081 # Policy Agent container internal port (container -> container)
ECS_VERSION="V1-2" # Version where the types are decoupled from the producer registration
ECS_FEATURE_LEVEL="INFO-TYPES TYPE-SUBSCRIPTIONS INFO-TYPE-INFO" # Space separated list of features
-MR_DMAAP_APP_NAME="dmaap-mr" # Name for the Dmaap MR
+MR_DMAAP_APP_NAME="message-router" # Name for the Dmaap MR
MR_STUB_APP_NAME="mr-stub" # Name of the MR stub
MR_DMAAP_DISPLAY_NAME="DMAAP Message Router"
MR_STUB_DISPLAY_NAME="Message Router stub"
MR_DMAAP_ALIVE_URL="/topics" # Base path for dmaap-mr alive check
MR_DMAAP_COMPOSE_DIR="dmaapmr" # Dir in simulator_group for dmaap mr for - docker-compose
MR_STUB_COMPOSE_DIR="mrstub" # Dir in simulator_group for mr stub for - docker-compose
-MR_KAFKA_APP_NAME="kafka" # Kafka app name
+MR_KAFKA_APP_NAME="message-router-kafka" # Kafka app name, if just named "kafka" the image will not start...
+MR_KAFKA_PORT=9092 # Kafka port number
MR_ZOOKEEPER_APP_NAME="zookeeper" # Zookeeper app name
-MR_DMAAP_HOST_MNT_DIR="/mnt2" # Config files dir on localhost
-
+MR_ZOOKEEPER_PORT="2181" # Zookeeper port number
+MR_DMAAP_HOST_MNT_DIR="/mnt" # Basedir localhost for mounted files
+MR_DMAAP_HOST_CONFIG_DIR="/configs" # Config files dir on localhost
CR_APP_NAME="callback-receiver" # Name for the Callback receiver
CR_DISPLAY_NAME="Callback receiver"
CR_INTERNAL_SECURE_PORT=8091 # Callback receiver container internal secure port (container -> container)
CR_APP_CALLBACK="/callbacks" # Url for callbacks
CR_APP_CALLBACK_MR="/callbacks-mr" # Url for callbacks (data from mr which contains string encoded jsons in a json arr)
+CR_APP_CALLBACK_TEXT="/callbacks-text" # Url for callbacks (data containing text data)
CR_ALIVE_URL="/" # Base path for alive check
CR_COMPOSE_DIR="cr" # Dir in simulator_group for docker-compose
KUBE_PROXY_ALIVE_URL="/" # Base path for alive check
KUBE_PROXY_COMPOSE_DIR="kubeproxy" # Dir in simulator_group for docker-compose
+PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container
+PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner
+PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls
+
DMAAP_ADP_APP_NAME="dmaapadapterservice" # Name for Dmaap Adapter container
DMAAP_ADP_DISPLAY_NAME="Dmaap Adapter Service" # Display name for Dmaap Adapter container
DMAAP_ADP_EXTERNAL_PORT=9087 # Dmaap Adapter container external port (host -> container)
#DMAAP_MED_CERT_MOUNT_DIR="./cert"
DMAAP_MED_ALIVE_URL="/status" # Base path for alive check
DMAAP_MED_COMPOSE_DIR="dmaapmed" # Dir in simulator_group for docker-compose
-#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
-DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
-DMAAP_MED_DATA_FILE="type_config.json" # Container data file name
-#DMAAP_MED_CONFIG_FILE=application.yaml # Config file name
-
-PVC_CLEANER_APP_NAME="pvc-cleaner" # Name for Persistent Volume Cleaner container
-PVC_CLEANER_DISPLAY_NAME="Persistent Volume Cleaner" # Display name for Persistent Volume Cleaner
-PVC_CLEANER_COMPOSE_DIR="pvc-cleaner" # Dir in simulator_group for yamls
+#MAAP_MED_CONFIG_MOUNT_PATH="/app" # Internal container path for configuration
+DMAAP_MED_DATA_MOUNT_PATH="/configs" # Path in container for data file
+DMAAP_MED_DATA_FILE="type_config.json" # Container data file name
########################################
# Setting for common curl-base function
########################################
-UUID="" # UUID used as prefix to the policy id to simulate a real UUID
- # Testscript need to set the UUID otherwise this empty prefix is used
+UUID="" # UUID used as prefix to the policy id to simulate a real UUID
+ # Testscript need to set the UUID otherwise this empty prefix is used
echo " [--ricsim-prefix <prefix> ] [--use-local-image <app-nam>+] [--use-snapshot-image <app-nam>+]"
echo " [--use-staging-image <app-nam>+] [--use-release-image <app-nam>+] [--image-repo <repo-address]"
echo " [--repo-policy local|remote] [--cluster-timeout <timeout-in seconds>] [--print-stats]"
- echo " [--override <override-environment-filename> --pre-clean]"
+ echo " [--override <override-environment-filename> --pre-clean --gen-stats]"
}
if [ $# -eq 1 ] && [ "$1" == "help" ]; then
echo "--print-stats - Print current test stats after each test."
echo "--override <file> - Override setting from the file supplied by --env-file"
echo "--pre-clean - Will clean kube resouces when running docker and vice versa"
+ echo "--gen-stats - Collect container/pod runtime statistics"
echo ""
echo "List of app short names supported: "$APP_SHORT_NAMES
#Var to control if current stats shall be printed
PRINT_CURRENT_STATS=0
+#Var to control if container/pod runtim statistics shall be collected
+COLLECT_RUNTIME_STATS=0
+
#File to keep deviation messages
DEVIATION_FILE=".tmp_deviations"
rm $DEVIATION_FILE &> /dev/null
}
trap trap_fnc ERR
+# Trap to kill subprocesses
+trap "kill 0" EXIT
+
# Counter for tests
TEST_SEQUENCE_NR=1
foundparm=0
fi
fi
+ if [ $paramerror -eq 0 ]; then
+ if [ "$1" == "--gen-stats" ]; then
+ COLLECT_RUNTIME_STATS=1
+ echo "Option set - Collect runtime statistics"
+ shift;
+ foundparm=0
+ fi
+ fi
+
done
echo ""
fi
fi
if [ $RUNMODE == "DOCKER" ]; then
- tmp=$(docker-compose version | grep -i 'Docker Compose version')
+ tmp=$(docker-compose version | grep -i 'docker' | grep -i 'compose' | grep -i 'version')
if [[ "$tmp" == *'v2'* ]]; then
echo -e $RED"docker-compose is using docker-compose version 2"$ERED
echo -e $RED"The test environment only support version 1"$ERED
echo -e $BOLD"======================================================="$EBOLD
echo ""
+ LOG_STAT_ARGS=""
+
for imagename in $APP_SHORT_NAMES; do
__check_included_image $imagename
retcode_i=$?
function_pointer="__"$imagename"_initial_setup"
$function_pointer
+
+ function_pointer="__"$imagename"_statisics_setup"
+ LOG_STAT_ARGS=$LOG_STAT_ARGS" "$($function_pointer)
fi
done
+ if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+ ../common/genstat.sh $RUNMODE $SECONDS $TESTLOGS/$ATC/stat_data.csv $LOG_STAT_ARGS &
+ fi
+
}
# Function to print the test result, shall be the last cmd in a test script
echo "Timer measurement in the test script"
echo "===================================="
column -t -s $'\t' $TIMER_MEASUREMENTS
+ if [ $RES_PASS != $RES_TEST ]; then
+ echo -e $RED"Measurement may not be reliable when there are failed test - script timeouts may cause long measurement values"$ERED
+ fi
echo ""
+ if [ $COLLECT_RUNTIME_STATS -eq 1 ]; then
+ echo "Runtime statistics collected in file: "$TESTLOGS/$ATC/stat_data.csv
+ echo ""
+ fi
+
total=$((RES_PASS+RES_FAIL))
if [ $RES_TEST -eq 0 ]; then
echo -e "\033[1mNo tests seem to have been executed. Check the script....\033[0m"
return 0
}
-# Function to create a configmap in kubernetes
-# args: <configmap-name> <namespace> <labelname> <labelid> <path-to-data-file> <path-to-output-yaml>
-# (Not for test scripts)
-__kube_create_configmapXXXXXXXXXXXXX() {
- echo -ne " Creating configmap $1 "$SAMELINE
- #envsubst < $5 > $5"_tmp"
- #cp $5"_tmp" $5 #Need to copy back to orig file name since create configmap neeed the original file name
- kubectl create configmap $1 -n $2 --from-file=$5 --dry-run=client -o yaml > $6
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Failed $ERED"
- ((RES_CONF_FAIL++))
- return 1
- fi
-
- kubectl apply -f $6 1> /dev/null 2> ./tmp/kubeerr
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Apply failed $ERED"
- echo " Message: $(<./tmp/kubeerr)"
- ((RES_CONF_FAIL++))
- return 1
- fi
- kubectl label configmap $1 -n $2 $3"="$4 --overwrite 1> /dev/null 2> ./tmp/kubeerr
- if [ $? -ne 0 ]; then
- echo -e " Creating configmap $1 $RED Labeling failed $ERED"
- echo " Message: $(<./tmp/kubeerr)"
- ((RES_CONF_FAIL++))
- return 1
- fi
- # Log the resulting map
- kubectl get configmap $1 -n $2 -o yaml > $6
-
- echo -e " Creating configmap $1 $GREEN OK $EGREEN"
- return 0
-}
-
# This function runs a kubectl cmd where a single output value is expected, for example get ip with jsonpath filter.
# The function retries up to the timeout given in the cmd flag '--cluster-timeout'
# args: <full kubectl cmd with parameters>
if [ $PRE_CLEAN -eq 1 ]; then
echo " Clean docker resouces to free up resources, may take time..."
../common/clean_docker.sh 2&>1 /dev/null
+ echo ""
fi
else
__clean_containers
if [ $PRE_CLEAN -eq 1 ]; then
- echo " Clean kubernetes resouces to free up resources, may take time..."
+ echo " Cleaning kubernetes resouces to free up resources, may take time..."
../common/clean_kube.sh 2&>1 /dev/null
+ echo ""
fi
fi
}
import logging
import socket
from threading import RLock
+from hashlib import md5
# Disable all logging of GET on reading counters and db
class AjaxFilter(logging.Filter):
# Request and response constants
CALLBACK_URL="/callbacks/<string:id>"
CALLBACK_MR_URL="/callbacks-mr/<string:id>" #Json list with string encoded items
+CALLBACK_TEXT_URL="/callbacks-text/<string:id>" # Callback for string of text
APP_READ_URL="/get-event/<string:id>"
APP_READ_ALL_URL="/get-all-events/<string:id>"
DUMP_ALL_URL="/db"
cntr_callbacks[id][1]+=1
msg=msg_callbacks[id][0]
print("Fetching msg for id: "+id+", msg="+str(msg))
- del msg[TIME_STAMP]
+
+ if (isinstance(msg,dict)):
+ del msg[TIME_STAMP]
+ if ("md5" in msg.keys()):
+ print("EXTRACTED MD5")
+ msg=msg["md5"]
+ print("MD5: "+str(msg))
+
del msg_callbacks[id][0]
return json.dumps(msg),200
print("No messages for id: "+id)
msg=msg_callbacks[id]
print("Fetching all msgs for id: "+id+", msg="+str(msg))
for sub_msg in msg:
- del sub_msg[TIME_STAMP]
+ if (isinstance(sub_msg, dict)):
+ del sub_msg[TIME_STAMP]
del msg_callbacks[id]
return json.dumps(msg),200
print("No messages for id: "+id)
with lock:
cntr_msg_callbacks += 1
- msg[TIME_STAMP]=str(datetime.now())
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
if (id in msg_callbacks.keys()):
msg_callbacks[id].append(msg)
else:
return 'OK',200
-# Receive a json callback message with payload fromatted accoirding to output frm the message router
-# URI and payload, (PUT or POST): /callbacks/<id> <json messages>
+# Receive a json callback message with payload formatted according to output from the message router
+# Array of stringified json objects
+# URI and payload, (PUT or POST): /callbacks-mr/<id> <json messages>
# json is a list of string encoded json items
# response: OK 200 or 500 for other errors
@app.route(CALLBACK_MR_URL,
global msg_callbacks
global cntr_msg_callbacks
+ storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+ #Large payloads will otherwise overload the server
try:
print("Received callback (mr) for id: "+id +", content-type="+request.content_type)
- remote_host_logging(request)
print("raw data: str(request.data): "+str(request.data))
+ if (storeas is None):
+ print("raw data: str(request.data): "+str(request.data))
do_delay()
try:
#if (request.content_type == MIME_JSON):
if (MIME_JSON in request.content_type):
data = request.data
msg_list = json.loads(data)
- print("Payload(json): "+str(msg_list))
+ if (storeas is None):
+ print("Payload(json): "+str(msg_list))
else:
msg_list=[]
print("Payload(content-type="+request.content_type+"). Setting empty json as payload")
with lock:
remote_host_logging(request)
for msg in msg_list:
- print("msg (str): "+str(msg))
- msg=json.loads(msg)
- print("msg (json): "+str(msg))
+ if (storeas is None):
+ msg=json.loads(msg)
+ else:
+ #Convert to compact json without ws between parameter and value...
+ #It seem that ws is added somewhere along to way to this server
+ msg=json.loads(msg)
+ msg=json.dumps(msg, separators=(',', ':'))
+
+ md5msg={}
+ md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+ msg=md5msg
+ print("msg (json converted to md5 hash): "+str(msg["md5"]))
cntr_msg_callbacks += 1
- msg[TIME_STAMP]=str(datetime.now())
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
if (id in msg_callbacks.keys()):
msg_callbacks[id].append(msg)
else:
return 'OK',200
+# Receive a callback message of a single text message (content type ignored)
+# or a json array of strings (content type json)
+# URI and payload, (PUT or POST): /callbacks-text/<id> <text message>
+# response: OK 200 or 500 for other errors
+@app.route(CALLBACK_TEXT_URL,
+ methods=['PUT','POST'])
+def events_write_text(id):
+ global msg_callbacks
+ global cntr_msg_callbacks
+
+ storeas=request.args.get('storeas') #If set, store payload as a md5 hascode and dont log the payload
+ #Large payloads will otherwise overload the server
+ try:
+ print("Received callback for id: "+id +", content-type="+request.content_type)
+ remote_host_logging(request)
+ if (storeas is None):
+ print("raw data: str(request.data): "+str(request.data))
+ do_delay()
+
+ try:
+ msg_list=None
+ if (MIME_JSON in request.content_type): #Json array of strings
+ msg_list=json.loads(request.data)
+ else:
+ data=request.data.decode("utf-8") #Assuming string
+ msg_list=[]
+ msg_list.append(data)
+
+ for msg in msg_list:
+ if (storeas == "md5"):
+ md5msg={}
+ print("msg: "+str(msg))
+ print("msg (endcode str): "+str(msg.encode('utf-8')))
+ md5msg["md5"]=md5(msg.encode('utf-8')).hexdigest()
+ msg=md5msg
+ print("msg (data converted to md5 hash): "+str(msg["md5"]))
+
+ if (isinstance(msg, dict)):
+ msg[TIME_STAMP]=str(datetime.now())
+
+ with lock:
+ cntr_msg_callbacks += 1
+ if (id in msg_callbacks.keys()):
+ msg_callbacks[id].append(msg)
+ else:
+ msg_callbacks[id]=[]
+ msg_callbacks[id].append(msg)
+
+ if (id in cntr_callbacks.keys()):
+ cntr_callbacks[id][0] += 1
+ else:
+ cntr_callbacks[id]=[]
+ cntr_callbacks[id].append(1)
+ cntr_callbacks[id].append(0)
+ except Exception as e:
+ print(CAUGHT_EXCEPTION+str(e))
+ traceback.print_exc()
+ return 'NOTOK',500
+
+
+ except Exception as e:
+ print(CAUGHT_EXCEPTION+str(e))
+ traceback.print_exc()
+ return 'NOTOK',500
+
+ return 'OK',200
+
### Functions for test ###
# Dump the whole db of current callbacks
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://localhost:2222;
+
+ client_max_body_size 0;
}
+
}
##
# SSL Settings
topic_write=""
topic_read=""
+generic_topics_upload_baseurl=""
uploader_thread=None
downloader_thread=None
+generic_uploader_thread=None
-# Function to download messages from dmaap
+# Function to upload PMS messages to dmaap
def dmaap_uploader():
global msg_requests
global cntr_msg_requests_fetched
sleep(0.01)
-# Function to upload messages to dmaap
+# Function to download PMS messages from dmaap
def dmaap_downloader():
global msg_responses
global cntr_msg_responses_submitted
except Exception as e:
sleep(1)
+# Function to upload generic messages to dmaap
+def dmaap_generic_uploader():
+ global msg_requests
+ global cntr_msg_requests_fetched
+
+ print("Starting generic uploader")
+
+ headers_json = {'Content-type': 'application/json', 'Accept': '*/*'}
+ headers_text = {'Content-type': 'text/plain', 'Accept': '*/*'}
+
+ while True:
+ if (len(generic_messages)):
+ for topicname in generic_messages.keys(): #topicname contains the path of the topics, eg. "/event/<topic>"
+ topic_queue=generic_messages[topicname]
+ if (len(topic_queue)>0):
+ if (topicname.endswith(".text")):
+ msg=topic_queue[0]
+ headers=headers_text
+ else:
+ msg=topic_queue[0]
+ msg=json.dumps(msg)
+ headers=headers_json
+ url=generic_topics_upload_baseurl+topicname
+ print("Sending to dmaap : "+ url)
+ print("Sending to dmaap : "+ msg)
+ print("Sending to dmaap : "+ str(headers))
+ try:
+ resp=requests.post(url, data=msg, headers=headers, timeout=10)
+ if (resp.status_code<199 & resp.status_code > 299):
+ print("Failed, response code: " + str(resp.status_code))
+ sleep(1)
+ else:
+ print("Dmaap response code: " + str(resp.status_code))
+ print("Dmaap response text: " + str(resp.text))
+ with lock:
+ topic_queue.pop(0)
+ cntr_msg_requests_fetched += 1
+ except Exception as e:
+ print("Failed, exception: "+ str(e))
+ sleep(1)
+ sleep(0.01)
+
#I'm alive function
@app.route('/',
methods=['GET'])
return 'OK', 200
-# Helper function to create a Dmaap request message
+# Helper function to create a Dmaap PMS request message
# args : <GET|PUT|DELETE> <correlation-id> <json-string-payload - may be None> <url>
# response: json formatted string of a complete Dmaap message
def create_message(operation, correlation_id, payload, url):
### MR-stub interface, for MR control
-# Send a message to MR
+# Send a PMS message to MR
# URI and parameters (PUT or POST): /send-request?operation=<GET|PUT|POST|DELETE>&url=<url>
# response: <correlation-id> (http 200) o4 400 for parameter error or 500 for other errors
@app.route(APP_WRITE_URL,
print(APP_WRITE_URL+"-"+CAUGHT_EXCEPTION+" "+str(e) + " "+traceback.format_exc())
return Response(SERVER_ERROR+" "+str(e), status=500, mimetype=MIME_TEXT)
-# Receive a message response for MR for the included correlation id
+# Receive a PMS message response for MR for the included correlation id
# URI and parameter, (GET): /receive-response?correlationid=<correlation-id>
# response: <json-array of 1 response> 200 or empty 204 or other errors 500
@app.route(APP_READ_URL,
### Dmaap interface ###
-# Read messages stream. URI according to agent configuration.
+# Read PMS messages stream. URI according to agent configuration.
# URI, (GET): /events/A1-POLICY-AGENT-READ/users/policy-agent
# response: 200 <json array of request messages>, or 500 for other errors
@app.route(AGENT_READ_URL,
print("timeout: "+str(timeout)+", start_time: "+str(start_time)+", current_time: "+str(current_time))
return Response("[]", status=200, mimetype=MIME_JSON)
-# Write messages stream. URI according to agent configuration.
+# Write PMS messages stream. URI according to agent configuration.
# URI and payload, (PUT or POST): /events/A1-POLICY-AGENT-WRITE <json array of response messages>
# response: OK 200 or 400 for missing json parameters, 500 for other errors
@app.route(AGENT_WRITE_URL,
return Response(json.dumps(res), status=200, mimetype=MIME_JSON)
return Response("[]", status=200, mimetype=MIME_JSON)
-# Generic POST/PUT catching all urls starting with /events/<topic>.
+# Generic POST catching all urls starting with /events/<topic>.
# Writes the message in a que for that topic
@app.route("/events/<path>",
- methods=['PUT','POST'])
+ methods=['POST'])
def generic_write(path):
global generic_messages
global cntr_msg_responses_submitted
write_method=str(request.method)
with lock:
try:
- payload=request.json
- print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
+ if (urlkey.endswith(".text")):
+ payload=str(request.data.decode('UTF-8'))
+ print(write_method+" on "+urlkey+" text=" + payload)
+ else:
+ payload=request.json
+ print(write_method+" on "+urlkey+" json=" + json.dumps(payload))
topicmsgs=[]
if (urlkey in generic_messages.keys()):
topicmsgs=generic_messages[urlkey]
global generic_messages
global cntr_msg_requests_fetched
+ if generic_topics_upload_baseurl:
+ return Response('Url not available when running as mrstub frontend', status=404, mimetype=MIME_TEXT)
+
urlpath="/events/"+str(path)
urlkey="/events/"+str(path).split("/")[0] #Extract topic
print("GET on topic"+urlkey)
uploader_thread=Thread(target=dmaap_uploader)
uploader_thread.start()
-else:
+if os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is not None:
+ print("GENERIC_TOPICS_UPLOAD_BASEURL:"+os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'])
+ generic_topics_upload_baseurl=os.environ['GENERIC_TOPICS_UPLOAD_BASEURL']
+ if generic_topics_upload_baseurl and generic_uploader_thread is None:
+ generic_uploader_thread=Thread(target=dmaap_generic_uploader)
+ generic_uploader_thread.start()
+
+if os.getenv("TOPIC_READ") is None or os.environ['GENERIC_TOPICS_UPLOAD_BASEURL'] is None:
print("No env variables - OK")
if __name__ == "__main__":
# serve dynamic requests
location / {
- proxy_pass http://localhost:2222;
+ proxy_pass http://localhost:2222;
+ client_max_body_size 0;
}
}
##
configuration-filepath: /opt/app/dmaap-adaptor-service/data/application_configuration.json
dmaap-base-url: $MR_SERVICE_PATH
# The url used to adress this component. This is used as a callback url sent to other components.
- dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
\ No newline at end of file
+ dmaap-adapter-base-url: $DMAAP_ADP_SERVICE_PATH
+ # KAFKA boostrap server. This is only needed if there are Information Types that uses a kafkaInputTopic
+ kafka:
+ bootstrap-servers: $MR_KAFKA_SERVICE_PATH
"types": [
{
"id": "ExampleInformationType",
- "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs",
+ "dmaapTopicUrl": "/events/unauthenticated.dmaapadp.json/dmaapadapterproducer/msgs?timeout=15000&limit=100",
"useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
- }
+ },
+ {
+ "id": "ExampleInformationTypeKafka",
+ "kafkaInputTopic": "unauthenticated.dmaapadp_kafka.text",
+ "useHttpProxy": ${DMMAAP_ADP_PROXY_FLAG}
+ }
]
}
\ No newline at end of file
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
- name: DMAAP_MR_ADDR
value: "$MR_SERVICE_PATH"
- name: LOG_LEVEL
- value: "Debug"
+ value: Debug
volumes:
- configMap:
defaultMode: 420
- INFO_PRODUCER_PORT=${DMAAP_MED_CONF_SELF_PORT}
- INFO_COORD_ADDR=${ECS_SERVICE_PATH}
- DMAAP_MR_ADDR=${MR_SERVICE_PATH}
- - LOG_LEVEL="Debug"
+ - LOG_LEVEL=Debug
volumes:
- ${DMAAP_MED_HOST_MNT_DIR}/$DMAAP_MED_DATA_FILE:${DMAAP_MED_DATA_MOUNT_PATH}/$DMAAP_MED_DATA_FILE
labels:
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
[
{
"id": "STD_Fault_Messages",
- "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages"
+ "dmaapTopicUrl": "/events/unauthenticated.dmaapmed.json/dmaapmediatorproducer/STD_Fault_Messages?timeout=15000&limit=100"
}
]
}
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
- name: $MR_DMAAP_KUBE_APP_NAME
+ name: $MR_DMAAP_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
replicas: 1
selector:
matchLabels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
template:
metadata:
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
containers:
- - name: $MR_DMAAP_KUBE_APP_NAME
+ - name: $MR_DMAAP_APP_NAME
image: $ONAP_DMAAPMR_IMAGE
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- mountPath: /appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
subPath: MsgRtrApi.properties
name: dmaapmr-msg-rtr-api
- volumeMounts:
- mountPath: /appl/dmaapMR1/bundleconfig/etc/logback.xml
subPath: logback.xml
name: dmaapmr-log-back
- volumeMounts:
- mountPath: /appl/dmaapMR1/etc/cadi.properties
subPath: cadi.properties
name: dmaapmr-cadi
apiVersion: apps/v1
kind: Deployment
metadata:
- name: $MR_KAFKA_BWDS_NAME
+ name: $MR_KAFKA_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
replicas: 1
selector:
matchLabels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
template:
metadata:
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
containers:
- - name: $MR_KAFKA_BWDS_NAME
+ - name: $MR_KAFKA_APP_NAME
image: $ONAP_KAFKA_IMAGE
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- name: http
- containerPort: 9095
+ containerPort: $MR_KAFKA_PORT
env:
- name: enableCadi
value: 'false'
- name: KAFKA_ZOOKEEPER_CONNECT
- value: 'zookeeper.onap:2181'
+ value: '$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT'
- name: KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS
value: '40000'
- name: KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS
- name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP
value: 'INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT'
- name: KAFKA_ADVERTISED_LISTENERS
- value: 'INTERNAL_PLAINTEXT://kaka:9092'
-# - name: KAFKA_ADVERTISED_LISTENERS
-# value: 'INTERNAL_PLAINTEXT://localhost:9092'
+ value: 'INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT'
- name: KAFKA_LISTENERS
- value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9095,INTERNAL_PLAINTEXT://0.0.0.0:9092'
+ value: 'INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
+ # - name: KAFKA_LISTENERS
+ # value: 'EXTERNAL_PLAINTEXT://0.0.0.0:9091,INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT'
- name: KAFKA_INTER_BROKER_LISTENER_NAME
value: INTERNAL_PLAINTEXT
- name: KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE
- name: KAFKA_OPTS
value: '-Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf'
- name: KAFKA_ZOOKEEPER_SET_ACL
- value: 'true'
+ value: 'false'
- name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
value: '1'
- name: KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS
value: '1'
-
volumeMounts:
- mountPath: /etc/kafka/secrets/jaas/zk_client_jaas.conf
subPath: zk_client_jaas.conf
imagePullPolicy: $KUBE_IMAGE_PULL_POLICY
ports:
- name: http
- containerPort: 2181
+ containerPort: $MR_ZOOKEEPER_PORT
env:
- name: ZOOKEEPER_REPLICAS
value: '1'
- name: ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL
value: '24'
- name: ZOOKEEPER_CLIENT_PORT
- value: '2181'
+ value: '$MR_ZOOKEEPER_PORT'
- name: KAFKA_OPTS
value: '-Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl'
- name: ZOOKEEPER_SERVER_ID
# LICENSE_START=======================================================
# org.onap.dmaap
# ================================================================================
+# Copyright © 2021 Nordix Foundation. All rights reserved.
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
##
## Both Cambria and Kafka make use of Zookeeper.
##
-config.zk.servers=zookeeper:2181
+config.zk.servers=$MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
###############################################################################
##
## if you want to change request.required.acks it can take this one value
#kafka.metadata.broker.list=localhost:9092,localhost:9093
#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
##kafka.request.required.acks=-1
#kafka.client.zookeeper=${config.zk.servers}
consumer.timeout.ms=100
cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
consumer.timeout=17
default.partitions=3
-default.replicas=3
+default.replicas=1
##############################################################################
#100mb
maxcontentlength=10000
--- /dev/null
+# ============LICENSE_START===============================================
+# Copyright (C) 2021 Nordix Foundation. All rights reserved.
+# ========================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=================================================
+#
+
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147
\ No newline at end of file
<!--
============LICENSE_START=======================================================
+ Copyright © 2021 Nordix Foundation. All rights reserved.
Copyright © 2019 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
image: $ONAP_ZOOKEEPER_IMAGE
container_name: $MR_ZOOKEEPER_APP_NAME
ports:
- - "2181:2181"
+ - "$MR_ZOOKEEPER_PORT:$MR_ZOOKEEPER_PORT"
environment:
ZOOKEEPER_REPLICAS: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_MAX_CLIENT_CNXNS: 200
ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
- ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_CLIENT_PORT: $MR_ZOOKEEPER_PORT
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl -Dzookeeper.4lw.commands.whitelist=*
ZOOKEEPER_SERVER_ID: 1
volumes:
image: $ONAP_KAFKA_IMAGE
container_name: $MR_KAFKA_APP_NAME
ports:
- - "9092:9092"
+ - "$MR_KAFKA_PORT:$MR_KAFKA_PORT"
environment:
enableCadi: 'false'
- KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ZOOKEEPER_CONNECT: $MR_ZOOKEEPER_APP_NAME:$MR_ZOOKEEPER_PORT
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
- KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
- KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+ KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://$MR_KAFKA_APP_NAME:$MR_KAFKA_PORT
+ KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:$MR_KAFKA_PORT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
--- /dev/null
+################################################################################
+# Copyright (c) 2021 Nordix Foundation. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); #
+# you may not use this file except in compliance with the License. #
+# You may obtain a copy of the License at #
+# #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, #
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+################################################################################
+*
+!.gitignore
\ No newline at end of file
Client {
- org.apache.zookeeper.server.auth.DigestLoginModule required
- username="kafka"
- password="kafka_secret";
- };
-
+ org.apache.zookeeper.server.auth.DigestLoginModule required
+ username="kafka"
+ password="kafka_secret";
+ };
\ No newline at end of file
+++ /dev/null
-# LICENSE_START=======================================================
-# org.onap.dmaap
-# ================================================================================
-# Copyright © 2020 Nordix Foundation. All rights reserved.
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
-config.zk.servers=zookeeper.onap:2181
-
-#config.zk.root=/fe3c/cambria/config
-
-
-###############################################################################
-##
-## Kafka Connection
-##
-## Items below are passed through to Kafka's producer and consumer
-## configurations (after removing "kafka.")
-## if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=akfak-bwds.onap:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-## Secured Config
-##
-## Some data stored in the config system is sensitive -- API keys and secrets,
-## for example. to protect it, we use an encryption layer for this section
-## of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-## Kafka expects live connections from the consumer to the broker, which
-## obviously doesn't work over connectionless HTTP requests. The Cambria
-## server proxies HTTP requests into Kafka consumer sessions that are kept
-## around for later re-use. Not doing so is costly for setup per request,
-## which would substantially impact a high volume consumer's performance.
-##
-## This complicates Cambria server failover, because we often need server
-## A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-## This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
# LICENSE_START=======================================================
# org.onap.dmaap
# ================================================================================
-# Copyright © 2020 Nordix Foundation. All rights reserved.
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
##
## Both Cambria and Kafka make use of Zookeeper.
##
-#config.zk.servers=172.18.1.1
-#config.zk.servers={{.Values.zookeeper.name}}:{{.Values.zookeeper.port}}
config.zk.servers=zookeeper:2181
-#config.zk.root=/fe3c/cambria/config
-
-
###############################################################################
##
## Kafka Connection
## if you want to change request.required.acks it can take this one value
#kafka.metadata.broker.list=localhost:9092,localhost:9093
#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kafka:9092
+kafka.metadata.broker.list=message-router-kafka:9092
##kafka.request.required.acks=-1
#kafka.client.zookeeper=${config.zk.servers}
consumer.timeout.ms=100
cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
authentication.adminSecret=fe3cCompound
-#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
-#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
###############################################################################
## This server can report its metrics periodically on a topic.
##
#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.topic=cambria.apinode.metrics
+#msgrtr.apinode.metrics.dmaap
#metrics.send.cambria.sendEverySeconds=60
cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
consumer.timeout=17
default.partitions=3
-default.replicas=3
+default.replicas=1
##############################################################################
#100mb
maxcontentlength=10000
kafka.max.poll.interval.ms=300000
kafka.heartbeat.interval.ms=60000
kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
-
+kafka.max.poll.records=1000
\ No newline at end of file
-aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+#Removed to be disable aaf in test env
+#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
aaf_env=DEV
aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+#Removed to be disable aaf in test env
+# cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+# cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
cadi_alias=dmaapmr@mr.dmaap.onap.org
cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
cadi_loglevel=INFO
cadi_protocols=TLSv1.1,TLSv1.2
cadi_latitude=37.78187
-cadi_longitude=-122.26147
-
+cadi_longitude=-122.26147
\ No newline at end of file
<!--
============LICENSE_START=======================================================
- Copyright © 2020 Nordix Foundation. All rights reserved.
- Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+ Copyright © 2019 AT&T Intellectual Property. All rights reserved.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
</root>
</configuration>
-
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
- user_kafka=kafka_secret;
-};
-
+ user_kafka="kafka_secret";
+};
\ No newline at end of file
+++ /dev/null
-# LICENSE_START=======================================================
-# org.onap.dmaap
-# ================================================================================
-# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-###############################################################################
-###############################################################################
-##
-## Cambria API Server config
-##
-## Default values are shown as commented settings.
-##
-###############################################################################
-##
-## HTTP service
-##
-## 3904 is standard as of 7/29/14.
-#
-## Zookeeper Connection
-##
-## Both Cambria and Kafka make use of Zookeeper.
-##
-config.zk.servers=zookeeper:2181
-
-###############################################################################
-##
-## Kafka Connection
-##
-## Items below are passed through to Kafka's producer and consumer
-## configurations (after removing "kafka.")
-## if you want to change request.required.acks it can take this one value
-#kafka.metadata.broker.list=localhost:9092,localhost:9093
-#kafka.metadata.broker.list={{.Values.kafka.name}}:{{.Values.kafka.port}}
-kafka.metadata.broker.list=kaka:9092
-##kafka.request.required.acks=-1
-#kafka.client.zookeeper=${config.zk.servers}
-consumer.timeout.ms=100
-zookeeper.connection.timeout.ms=6000
-zookeeper.session.timeout.ms=20000
-zookeeper.sync.time.ms=2000
-auto.commit.interval.ms=1000
-fetch.message.max.bytes =1000000
-auto.commit.enable=false
-
-#(backoff*retries > zksessiontimeout)
-kafka.rebalance.backoff.ms=10000
-kafka.rebalance.max.retries=6
-
-
-###############################################################################
-##
-## Secured Config
-##
-## Some data stored in the config system is sensitive -- API keys and secrets,
-## for example. to protect it, we use an encryption layer for this section
-## of the config.
-##
-## The key is a base64 encode AES key. This must be created/configured for
-## each installation.
-#cambria.secureConfig.key=
-##
-## The initialization vector is a 16 byte value specific to the secured store.
-## This must be created/configured for each installation.
-#cambria.secureConfig.iv=
-
-## Southfield Sandbox
-cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
-cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
-authentication.adminSecret=fe3cCompound
-
-
-###############################################################################
-##
-## Consumer Caching
-##
-## Kafka expects live connections from the consumer to the broker, which
-## obviously doesn't work over connectionless HTTP requests. The Cambria
-## server proxies HTTP requests into Kafka consumer sessions that are kept
-## around for later re-use. Not doing so is costly for setup per request,
-## which would substantially impact a high volume consumer's performance.
-##
-## This complicates Cambria server failover, because we often need server
-## A to close its connection before server B brings up the replacement.
-##
-
-## The consumer cache is normally enabled.
-#cambria.consumer.cache.enabled=true
-
-## Cached consumers are cleaned up after a period of disuse. The server inspects
-## consumers every sweepFreqSeconds and will clean up any connections that are
-## dormant for touchFreqMs.
-#cambria.consumer.cache.sweepFreqSeconds=15
-cambria.consumer.cache.touchFreqMs=120000
-##stickforallconsumerrequests=false
-## The cache is managed through ZK. The default value for the ZK connection
-## string is the same as config.zk.servers.
-#cambria.consumer.cache.zkConnect=${config.zk.servers}
-
-##
-## Shared cache information is associated with this node's name. The default
-## name is the hostname plus the HTTP service port this host runs on. (The
-## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
-## which is not always adequate.) You can set this value explicitly here.
-##
-#cambria.api.node.identifier=<use-something-unique-to-this-instance>
-
-#cambria.rateLimit.maxEmptyPollsPerMinute=30
-#cambria.rateLimitActual.delay.ms=10
-
-###############################################################################
-##
-## Metrics Reporting
-##
-## This server can report its metrics periodically on a topic.
-##
-#metrics.send.cambria.enabled=true
-#metrics.send.cambria.topic=cambria.apinode.metrics
-#msgrtr.apinode.metrics.dmaap
-#metrics.send.cambria.sendEverySeconds=60
-
-cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
-consumer.timeout=17
-default.partitions=3
-default.replicas=3
-##############################################################################
-#100mb
-maxcontentlength=10000
-
-
-##############################################################################
-#AAF Properties
-msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
-msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-enforced.topic.name.AAF=org.onap.dmaap.mr
-forceAAF=false
-transidUEBtopicreqd=false
-defaultNSforUEB=org.onap.dmaap.mr
-##############################################################################
-#Mirror Maker Agent
-
-msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
-msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
-msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
-msgRtr.mirrormaker.timeout=15000
-msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mirrormakeragent
-msgRtr.mirrormaker.consumergroup=mmagentserver
-msgRtr.mirrormaker.consumerid=1
-
-kafka.max.poll.interval.ms=300000
-kafka.heartbeat.interval.ms=60000
-kafka.session.timeout.ms=240000
-kafka.max.poll.records=1000
\ No newline at end of file
+++ /dev/null
-#aaf_locate_url=https://aaf-onap-test.osaaf.org:8095\
-aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
-aaf_env=DEV
-aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
-
-cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
-cadi_truststore_password=8FyfX+ar;0$uZQ0h9*oXchNX
-
-cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
-
-cadi_alias=dmaapmr@mr.dmaap.onap.org
-cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
-cadi_keystore_password=GDQttV7)BlOvWMf6F7tz&cjy
-cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
-
-cadi_loglevel=INFO
-cadi_protocols=TLSv1.1,TLSv1.2
-cadi_latitude=37.78187
-cadi_longitude=-122.26147
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
- name: $MR_DMAAP_KUBE_APP_NAME
+ name: $MR_DMAAP_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
autotest: DMAAPMR
spec:
type: ClusterIP
protocol: TCP
name: https
selector:
- run: $MR_DMAAP_KUBE_APP_NAME
+ run: $MR_DMAAP_APP_NAME
---
apiVersion: v1
kind: Service
metadata:
- name: $MR_KAFKA_BWDS_NAME
+ name: $MR_KAFKA_APP_NAME
namespace: $KUBE_ONAP_NAMESPACE
labels:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
autotest: DMAAPMR
spec:
type: ClusterIP
ports:
- - port: 9092
- targetPort: 9095
+ - port: $MR_KAFKA_PORT
+ targetPort: $MR_KAFKA_PORT
protocol: TCP
name: http
selector:
- run: $MR_KAFKA_BWDS_NAME
+ run: $MR_KAFKA_APP_NAME
---
apiVersion: v1
kind: Service
spec:
type: ClusterIP
ports:
- - port: 2181
- targetPort: 2181
+ - port: $MR_ZOOKEEPER_PORT
+ targetPort: $MR_ZOOKEEPER_PORT
protocol: TCP
name: http
selector:
run: $MR_ZOOKEEPER_APP_NAME
-
-
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-mr
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_DMAAP_KUBE_APP_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: $MR_EXTERNAL_PORT
-# targetPort: $MR_INTERNAL_PORT
-# protocol: TCP
-# name: http
-# - port: $MR_EXTERNAL_SECURE_PORT
-# targetPort: $MR_INTERNAL_SECURE_PORT
-# protocol: TCP
-# name: https
-# selector:
-# run: $MR_DMAAP_KUBE_APP_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-kafka
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_KAFKA_BWDS_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 9092
-# targetPort: 9092
-# protocol: TCP
-# name: http
-# selector:
-# run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: kafka
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_KAFKA_BWDS_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 9092
-# targetPort: 9092
-# protocol: TCP
-# name: http
-# selector:
-# run: $MR_KAFKA_BWDS_NAME
-# ---
-# apiVersion: v1
-# kind: Service
-# metadata:
-# name: dmaap-zookeeper
-# namespace: $KUBE_ONAP_NAMESPACE
-# labels:
-# run: $MR_ZOOKEEPER_APP_NAME
-# autotest: DMAAPMR
-# spec:
-# type: ClusterIP
-# ports:
-# - port: 2181
-# targetPort: 2181
-# protocol: TCP
-# name: http
-# selector:
- run: $MR_ZOOKEEPER_APP_NAME
\ No newline at end of file
- name: TOPIC_READ
value: $TOPIC_READ
- name: TOPIC_WRITE
- value: $TOPIC_WRITE
\ No newline at end of file
+ value: $TOPIC_WRITE
+ - name: GENERIC_TOPICS_UPLOAD_BASEURL
+ value: $GENERIC_TOPICS_UPLOAD_BASEURL
\ No newline at end of file
environment:
- TOPIC_READ=${TOPIC_READ}
- TOPIC_WRITE=${TOPIC_WRITE}
+ - GENERIC_TOPICS_UPLOAD_BASEURL=${GENERIC_TOPICS_UPLOAD_BASEURL}
labels:
- "nrttest_app=MR"
- "nrttest_dp=${MR_STUB_DISPLAY_NAME}"
kind: Deployment
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_DB_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Deployment
metadata:
name: $SDNC_DB_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: $SDNC_APP_NAME
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: dbhost
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
kind: Service
metadata:
name: sdnctldb01
- namespace: $KUBE_SNDC_NAMESPACE
+ namespace: $KUBE_SDNC_NAMESPACE
labels:
run: $SDNC_DB_APP_NAME
autotest: SDNC
.history
oruclosedloop
-simulator
+producer
+sdnr
This consumer creates a job of type `STD_Fault_Messages` in the Information Coordinator Service (ICS). When it recieves messages, it checks if they are link failure messages. If they are, it checks if the event severity is other than normal. If so, it looks up the O-DU ID mapped to the O-RU the message originates from and sends a configuration message to the O-DU through SDNC. If the event severity is normal, then it logs, on `Debug` level, that the link failure has been cleared.
-The producer takes a number of environment variables, described below, as configuration.
+## Configuration
+
+The consumer takes a number of environment variables, described below, as configuration.
>- CONSUMER_HOST **Required**. The host for the consumer. Example: `http://mrproducer`
->- CONSUMER_HOST **Required**. The port for the consumer. Example: `8095`
->- LOG_LEVEL Optional. The log level, which can be `Error`, `Warn`, `Info` or `Debug`. Defaults to `Info`.
+>- CONSUMER_PORT **Required**. The port for the consumer. Example: `8095`
+>- CONSUMER_CERT_PATH **Required**. The path to the certificate to use for https. Defaults to `security/producer.crt`
+>- CONSUMER_KEY_PATH **Required**. The path to the key to the certificate to use for https. Defaults to `security/producer.key`
>- INFO_COORD_ADDR Optional. The address of the Information Coordinator. Defaults to `http://enrichmentservice:8083`.
->- SDNR_HOST Optional. The host for SDNR. Defaults to `http://localhost`.
->- SDNR_PORT Optional. The port for SDNR. Defaults to `3904`.
+>- SDNR_ADDRESS Optional. The address for SDNR. Defaults to `http://localhost:3904`.
>- SDNR_USER Optional. The user for the SDNR. Defaults to `admin`.
>- SDNR_PASSWORD Optional. The password for the SDNR user. Defaults to `Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U`.
>- ORU_TO_ODU_MAP_FILE Optional. The file containing the mapping from O-RU ID to O-DU ID. Defaults to `o-ru-to-o-du-map.csv`.
+>- LOG_LEVEL Optional. The log level, which can be `Error`, `Warn`, `Info` or `Debug`. Defaults to `Info`.
+
+Any of the addresses used by this product can be configured to use https, by specifying it as the scheme of the address URI. The client will not use server certificate verification. The consumer's own callback will only listen to the scheme configured in the scheme of the consumer host address.
+
+The configured public key and cerificate shall be PEM-encoded. A self signed certificate and key are provided in the `security` folder of the project. These files should be replaced for production. To generate a self signed key and certificate, use the example code below:
+
+ openssl req -new -x509 -sha256 -key server.key -out server.crt -days 3650
-The creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below.
+T## Functionality
+
+he creation of the job is not done when the consumer is started. Instead the consumer provides a REST API where it can be started and stopped, described below.
>- /start Creates the job in ICS.
>- /stop Deletes the job in ICS.
If the consumer is shut down with a SIGTERM, it will also delete the job before exiting.
+## Development
+
+To make it easy to test during development of the consumer, two stubs are provided in the `stub` folder.
+
+One, under the `producer` folder, called `producer` that stubs the producer and pushes an array with one message with `eventSeverity` alternating between `NORMAL` and `CRITICAL`. To build and start the stub, do the following:
+>1. cd stub/producer
+>2. go build
+>3. ./producer
+
+One, under the `sdnr` folder, called `sdnr` that at startup will listen for REST calls and print the body of them. By default, it listens to the port `3904`, but his can be overridden by passing a `-port [PORT]` flag when starting the stub. To build and start the stub, do the following:
+>1. cd stub/sdnr
+>2. go build
+>3. ./sdnr
+
+Mocks needed for unit tests have been generated using `github.com/stretchr/testify/mock` and are checked in under the `mocks` folder. **Note!** Keep in mind that if any of the mocked interfaces change, a new mock for that interface must be generated and checked in.
+
## License
Copyright (C) 2021 Nordix Foundation.
require (
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/google/uuid v1.3.0 // indirect
- github.com/gorilla/mux v1.8.0 // indirect
+ github.com/gorilla/mux v1.8.0
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.1.1 // indirect
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 // indirect
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
)
+
+require (
+ github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
+ github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
+)
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4=
+github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
package config
import (
+ "fmt"
"os"
"strconv"
)
type Config struct {
- LogLevel log.Level
ConsumerHost string
ConsumerPort int
InfoCoordinatorAddress string
- SDNRHost string
- SDNRPort int
+ SDNRAddress string
SDNRUser string
SDNPassword string
ORUToODUMapFile string
+ ConsumerCertPath string
+ ConsumerKeyPath string
+ LogLevel log.Level
}
func New() *Config {
return &Config{
- LogLevel: getLogLevel(),
ConsumerHost: getEnv("CONSUMER_HOST", ""),
ConsumerPort: getEnvAsInt("CONSUMER_PORT", 0),
InfoCoordinatorAddress: getEnv("INFO_COORD_ADDR", "http://enrichmentservice:8083"),
- SDNRHost: getEnv("SDNR_HOST", "http://localhost"),
- SDNRPort: getEnvAsInt("SDNR_PORT", 3904),
+ SDNRAddress: getEnv("SDNR_ADDR", "http://localhost:3904"),
SDNRUser: getEnv("SDNR_USER", "admin"),
SDNPassword: getEnv("SDNR_PASSWORD", "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"),
ORUToODUMapFile: getEnv("ORU_TO_ODU_MAP_FILE", "o-ru-to-o-du-map.csv"),
+ ConsumerCertPath: getEnv("CONSUMER_CERT_PATH", "security/consumer.crt"),
+ ConsumerKeyPath: getEnv("CONSUMER_KEY_PATH", "security/consumer.key"),
+ LogLevel: getLogLevel(),
}
}
+func (c Config) String() string {
+ return fmt.Sprintf("ConsumerHost: %v, ConsumerPort: %v, InfoCoordinatorAddress: %v, SDNRAddress: %v, SDNRUser: %v, SDNRPassword: %v, ORUToODUMapFile: %v, ConsumerCertPath: %v, ConsumerKeyPath: %v, LogLevel: %v", c.ConsumerHost, c.ConsumerPort, c.InfoCoordinatorAddress, c.SDNRAddress, c.SDNRUser, c.SDNPassword, c.ORUToODUMapFile, c.ConsumerCertPath, c.ConsumerKeyPath, c.LogLevel)
+}
+
func getEnv(key string, defaultVal string) string {
if value, exists := os.LookupEnv(key); exists {
return value
func TestNew_envVarsSetConfigContainSetValues(t *testing.T) {
assertions := require.New(t)
- os.Setenv("LOG_LEVEL", "Debug")
os.Setenv("CONSUMER_HOST", "consumerHost")
os.Setenv("CONSUMER_PORT", "8095")
os.Setenv("INFO_COORD_ADDR", "infoCoordAddr")
- os.Setenv("SDNR_HOST", "sdnrHost")
- os.Setenv("SDNR_PORT", "3908")
+ os.Setenv("SDNR_ADDR", "sdnrHost:3908")
os.Setenv("SDNR_USER", "admin")
os.Setenv("SDNR_PASSWORD", "pwd")
os.Setenv("ORU_TO_ODU_MAP_FILE", "file")
+ os.Setenv("CONSUMER_CERT_PATH", "cert")
+ os.Setenv("CONSUMER_KEY_PATH", "key")
+ os.Setenv("LOG_LEVEL", "Debug")
t.Cleanup(func() {
os.Clearenv()
})
wantConfig := Config{
- LogLevel: log.DebugLevel,
ConsumerHost: "consumerHost",
ConsumerPort: 8095,
InfoCoordinatorAddress: "infoCoordAddr",
- SDNRHost: "sdnrHost",
- SDNRPort: 3908,
+ SDNRAddress: "sdnrHost:3908",
SDNRUser: "admin",
SDNPassword: "pwd",
ORUToODUMapFile: "file",
+ ConsumerCertPath: "cert",
+ ConsumerKeyPath: "key",
+ LogLevel: log.DebugLevel,
}
got := New()
os.Clearenv()
})
wantConfig := Config{
- LogLevel: log.InfoLevel,
ConsumerHost: "",
ConsumerPort: 0,
InfoCoordinatorAddress: "http://enrichmentservice:8083",
- SDNRHost: "http://localhost",
- SDNRPort: 3904,
+ SDNRAddress: "http://localhost:3904",
SDNRUser: "admin",
SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
ORUToODUMapFile: "o-ru-to-o-du-map.csv",
+ ConsumerCertPath: "security/consumer.crt",
+ ConsumerKeyPath: "security/consumer.key",
+ LogLevel: log.InfoLevel,
}
got := New()
os.Clearenv()
})
wantConfig := Config{
- LogLevel: log.InfoLevel,
ConsumerHost: "",
ConsumerPort: 0,
InfoCoordinatorAddress: "http://enrichmentservice:8083",
- SDNRHost: "http://localhost",
- SDNRPort: 3904,
+ SDNRAddress: "http://localhost:3904",
SDNRUser: "admin",
SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
ORUToODUMapFile: "o-ru-to-o-du-map.csv",
+ ConsumerCertPath: "security/consumer.crt",
+ ConsumerKeyPath: "security/consumer.key",
+ LogLevel: log.InfoLevel,
}
got := New()
assertions.Equal(&wantConfig, got)
if error := restclient.Put(lfh.config.SDNRAddress+sdnrPath, unlockMessage, lfh.client, lfh.config.SDNRUser, lfh.config.SDNRPassword); error == nil {
log.Debugf("Sent unlock message for O-RU: %v to O-DU: %v.", oRuId, oDuId)
} else {
- log.Warn(error)
+ log.Warn("Send of unlock message failed due to ", error)
}
} else {
- log.Warn(err)
+ log.Warn("Send of unlock message failed due to ", err)
}
}
import (
"bytes"
+ "crypto/tls"
"fmt"
"io"
+ "math"
"net/http"
+ "net/url"
+ "time"
+
+ "github.com/hashicorp/go-retryablehttp"
)
type RequestError struct {
}
func (e RequestError) Error() string {
- return fmt.Sprintf("Request failed due to error response with status: %v and body: %v", e.StatusCode, string(e.Body))
+ return fmt.Sprintf("error response with status: %v and body: %v", e.StatusCode, string(e.Body))
}
// HTTPClient interface
return do(http.MethodDelete, url, nil, client)
}
+func CreateClientCertificate(certPath string, keyPath string) (tls.Certificate, error) {
+ if cert, err := tls.LoadX509KeyPair(certPath, keyPath); err == nil {
+ return cert, nil
+ } else {
+ return tls.Certificate{}, fmt.Errorf("cannot create x509 keypair from cert file %s and key file %s due to: %v", certPath, keyPath, err)
+ }
+}
+
+func CreateRetryClient(cert tls.Certificate) *http.Client {
+ rawRetryClient := retryablehttp.NewClient()
+ rawRetryClient.RetryWaitMax = time.Minute
+ rawRetryClient.RetryMax = math.MaxInt
+ rawRetryClient.HTTPClient.Transport = getSecureTransportWithoutVerify(cert)
+
+ client := rawRetryClient.StandardClient()
+ return client
+}
+
+func IsUrlSecure(configUrl string) bool {
+ u, _ := url.Parse(configUrl)
+ return u.Scheme == "https"
+}
+
+func getSecureTransportWithoutVerify(cert tls.Certificate) *http.Transport {
+ return &http.Transport{
+ TLSClientConfig: &tls.Config{
+ Certificates: []tls.Certificate{
+ cert,
+ },
+ InsecureSkipVerify: true,
+ },
+ }
+}
+
func do(method string, url string, body []byte, client HTTPClient, userInfo ...string) error {
if req, reqErr := http.NewRequest(method, url, bytes.NewBuffer(body)); reqErr == nil {
if body != nil {
import (
"bytes"
+ "crypto/tls"
"fmt"
"io/ioutil"
+ "math"
"net/http"
+ "reflect"
"testing"
+ "time"
+ "github.com/hashicorp/go-retryablehttp"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"oransc.org/usecase/oruclosedloop/mocks"
StatusCode: http.StatusBadRequest,
Body: []byte("error"),
}
- assertions.Equal("Request failed due to error response with status: 400 and body: error", actualError.Error())
+ assertions.Equal("error response with status: 400 and body: error", actualError.Error())
}
func TestPutWithoutAuth(t *testing.T) {
})
}
}
+
+func Test_createClientCertificate(t *testing.T) {
+ assertions := require.New(t)
+ wantedCert, _ := tls.LoadX509KeyPair("../../security/consumer.crt", "../../security/consumer.key")
+ type args struct {
+ certPath string
+ keyPath string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantCert tls.Certificate
+ wantErr error
+ }{
+ {
+ name: "Paths to cert info ok should return cerftificate",
+ args: args{
+ certPath: "../../security/consumer.crt",
+ keyPath: "../../security/consumer.key",
+ },
+ wantCert: wantedCert,
+ },
+ {
+ name: "Paths to cert info not ok should return error with info about error",
+ args: args{
+ certPath: "wrong_cert",
+ keyPath: "wrong_key",
+ },
+ wantErr: fmt.Errorf("cannot create x509 keypair from cert file wrong_cert and key file wrong_key due to: open wrong_cert: no such file or directory"),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cert, err := CreateClientCertificate(tt.args.certPath, tt.args.keyPath)
+ assertions.Equal(tt.wantCert, cert, tt.name)
+ assertions.Equal(tt.wantErr, err, tt.name)
+ })
+ }
+}
+
+func Test_CreateRetryClient(t *testing.T) {
+ assertions := require.New(t)
+
+ client := CreateRetryClient(tls.Certificate{})
+
+ transport := client.Transport
+ assertions.Equal("*retryablehttp.RoundTripper", reflect.TypeOf(transport).String())
+ retryableTransport := transport.(*retryablehttp.RoundTripper)
+ retryableClient := retryableTransport.Client
+ assertions.Equal(time.Minute, retryableClient.RetryWaitMax)
+ assertions.Equal(math.MaxInt, retryableClient.RetryMax)
+}
+
+func TestIsUrlSecured(t *testing.T) {
+ assertions := require.New(t)
+
+ assertions.True(IsUrlSecure("https://url"))
+
+ assertions.False(IsUrlSecure("http://url"))
+}
package main
import (
+ "crypto/tls"
"encoding/json"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
- "time"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
ListenAndServe() error
}
-const timeoutHTTPClient = time.Second * 5
const jobId = "14e7bb84-a44d-44c1-90b7-6995a92ad43c"
var jobRegistrationInfo = struct {
configuration = config.New()
log.SetLevel(configuration.LogLevel)
-
- client = &http.Client{
- Timeout: timeoutHTTPClient,
- }
+ log.Debug("Using configuration: ", configuration)
consumerPort = fmt.Sprint(configuration.ConsumerPort)
jobRegistrationInfo.JobResultUri = configuration.ConsumerHost + ":" + consumerPort
linkfailureConfig = linkfailure.Configuration{
- SDNRAddress: configuration.SDNRHost + ":" + fmt.Sprint(configuration.SDNRPort),
+ SDNRAddress: configuration.SDNRAddress,
SDNRUser: configuration.SDNRUser,
SDNRPassword: configuration.SDNPassword,
}
log.Fatalf("Unable to create LookupService due to inability to get O-RU-ID to O-DU-ID map. Cause: %v", initErr)
}
+ var cert tls.Certificate
+ if c, err := restclient.CreateClientCertificate(configuration.ConsumerCertPath, configuration.ConsumerKeyPath); err == nil {
+ cert = c
+ } else {
+ log.Fatalf("Stopping producer due to error: %v", err)
+ }
+ client = restclient.CreateRetryClient(cert)
+
go func() {
- startServer(&http.Server{
- Addr: ":" + consumerPort,
- Handler: getRouter(),
- })
- deleteJob()
+ startServer()
os.Exit(1) // If the startServer function exits, it is because there has been a failure in the server, so we exit.
}()
if configuration.ConsumerHost == "" || configuration.ConsumerPort == 0 {
return fmt.Errorf("consumer host and port must be provided")
}
+
+ if configuration.ConsumerCertPath == "" || configuration.ConsumerKeyPath == "" {
+ return fmt.Errorf("missing CONSUMER_CERT and/or CONSUMER_KEY")
+ }
+
return nil
}
return r
}
-func startServer(server Server) {
- if err := server.ListenAndServe(); err != nil {
+func startServer() {
+ var err error
+ if restclient.IsUrlSecure(configuration.ConsumerHost) {
+ err = http.ListenAndServeTLS(fmt.Sprintf(":%v", configuration.ConsumerPort), configuration.ConsumerCertPath, configuration.ConsumerKeyPath, getRouter())
+ } else {
+ err = http.ListenAndServe(fmt.Sprintf(":%v", configuration.ConsumerPort), getRouter())
+ }
+ if err != nil {
log.Errorf("Server stopped unintentionally due to: %v. Deleteing job.", err)
if deleteErr := deleteJob(); deleteErr != nil {
log.Error(fmt.Sprintf("Unable to delete consumer job due to: %v. Please remove job %v manually.", deleteErr, jobId))
doInit()
wantedConfiguration := &config.Config{
- LogLevel: log.InfoLevel,
ConsumerHost: "consumerHost",
ConsumerPort: 8095,
InfoCoordinatorAddress: "http://enrichmentservice:8083",
- SDNRHost: "http://localhost",
- SDNRPort: 3904,
+ SDNRAddress: "http://localhost:3904",
SDNRUser: "admin",
SDNPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U",
ORUToODUMapFile: "o-ru-to-o-du-map.csv",
+ ConsumerCertPath: "security/consumer.crt",
+ ConsumerKeyPath: "security/consumer.key",
+ LogLevel: log.InfoLevel,
}
assertions.Equal(wantedConfiguration, configuration)
assertions.Equal(wantedConfiguration.ConsumerHost+":"+fmt.Sprint(wantedConfiguration.ConsumerPort), jobRegistrationInfo.JobResultUri)
wantedLinkFailureConfig := linkfailure.Configuration{
- SDNRAddress: wantedConfiguration.SDNRHost + ":" + fmt.Sprint(wantedConfiguration.SDNRPort),
+ SDNRAddress: wantedConfiguration.SDNRAddress,
SDNRUser: wantedConfiguration.SDNRUser,
SDNRPassword: wantedConfiguration.SDNPassword,
}
name: "Valid config, should return nil",
args: args{
configuration: &config.Config{
- ConsumerHost: "host",
- ConsumerPort: 80,
+ ConsumerHost: "host",
+ ConsumerPort: 80,
+ ConsumerCertPath: "security/consumer.crt",
+ ConsumerKeyPath: "security/consumer.key",
},
},
},
assertions.Equal("/admin/stop", path)
}
-func Test_startServer_shouldDeleteJobWhenServerStopsWithErrorAndLog(t *testing.T) {
- assertions := require.New(t)
-
- var buf bytes.Buffer
- log.SetOutput(&buf)
-
- os.Setenv("CONSUMER_PORT", "wrong")
- t.Cleanup(func() {
- log.SetOutput(os.Stderr)
- })
-
- mockServer := &mocks.Server{}
- mockServer.On("ListenAndServe").Return(errors.New("Server failure"))
-
- startServer(mockServer)
-
- log := buf.String()
- assertions.Contains(log, "level=error")
- assertions.Contains(log, "Server stopped unintentionally due to: Server failure. Deleteing job.")
- assertions.Contains(log, "Please remove job 14e7bb84-a44d-44c1-90b7-6995a92ad43c manually")
-}
-
func Test_startHandler(t *testing.T) {
assertions := require.New(t)
+++ /dev/null
-// Code generated by mockery v1.0.0. DO NOT EDIT.
-
-package mocks
-
-import mock "github.com/stretchr/testify/mock"
-
-// Server is an autogenerated mock type for the Server type
-type Server struct {
- mock.Mock
-}
-
-// ListenAndServe provides a mock function with given fields:
-func (_m *Server) ListenAndServe() error {
- ret := _m.Called()
-
- var r0 error
- if rf, ok := ret.Get(0).(func() error); ok {
- r0 = rf()
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
--- /dev/null
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgIUEbuDTP0ixwxCxCQ9tR5DijGCbtkwDQYJKoZIhvcNAQEL
+BQAwPzELMAkGA1UEBhMCc2UxDDAKBgNVBAoMA0VTVDERMA8GA1UECwwIRXJpY3Nz
+b24xDzANBgNVBAMMBnNlcnZlcjAeFw0yMTEwMTkxNDA1MzVaFw0zMTEwMTcxNDA1
+MzVaMD8xCzAJBgNVBAYTAnNlMQwwCgYDVQQKDANFU1QxETAPBgNVBAsMCEVyaWNz
+c29uMQ8wDQYDVQQDDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQDnH4imV8kx/mXz6BDbq8e4oZGqGgv7V837iNspj/zIZXhEMP9311fdsZEE
+Y6VWU47bSYRn2xJOP+wmfKewbw0OcEWu/RkdvO7Y0VIVrlbEJYu88ZjK14dMUpfe
+72iMbTc5q2uYi0ImB5/m3jyMSXgso6NDWuvXrp2VSWjb1tG++des9rhvyrZyNrua
+I4iOnMvvuc71gvHol7appRu3+LRTQFYsAizdfHEQ9k949MZH4fiIu5NmCT/wNJVo
+uUZYYJseFhOlIANaXn6qmz7kKVYfxfV+Z5EccaRixaClCFwyRdmjgLyyeuI4/QPD
+x9PjmGmf6eOEC2ZHBi4OHwjIzmLnAgMBAAGjUzBRMB0GA1UdDgQWBBRjeDLPpLm2
+W623wna7xBCbHxtxVjAfBgNVHSMEGDAWgBRjeDLPpLm2W623wna7xBCbHxtxVjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAbFUAWFZaIMXmd5qv/
+xJYr1oPJpsmbgWGRWZWDZqbUabvWObyXlDJWIau60BerfcC5TmyElBjTyONSGwCT
+tq+SVB0PXpgqa8ZQ25Ytn2AMDFWhrGbOefCXs6te3HGq6BNubTWrOVIvJypCwC95
++iXVuDd4eg+n2fWv7h7fZRZHum/zLoRxB2lKoMMbc/BQX9hbtP6xyvIVvaYdhcJw
+VzJJGIDqpMiMH6IBaOFSmgfOyGblGKAicj3E3kpGBfadLx3R+9V6aG7zyBnVbr2w
+YJbV2Ay4PrF+PTpCMB/mNwC5RBTYHpSNdrCMSyq3I+QPVJq8dPJr7fd1Uwl3WHqX
+FV0h
+-----END CERTIFICATE-----
--- /dev/null
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDnH4imV8kx/mXz
+6BDbq8e4oZGqGgv7V837iNspj/zIZXhEMP9311fdsZEEY6VWU47bSYRn2xJOP+wm
+fKewbw0OcEWu/RkdvO7Y0VIVrlbEJYu88ZjK14dMUpfe72iMbTc5q2uYi0ImB5/m
+3jyMSXgso6NDWuvXrp2VSWjb1tG++des9rhvyrZyNruaI4iOnMvvuc71gvHol7ap
+pRu3+LRTQFYsAizdfHEQ9k949MZH4fiIu5NmCT/wNJVouUZYYJseFhOlIANaXn6q
+mz7kKVYfxfV+Z5EccaRixaClCFwyRdmjgLyyeuI4/QPDx9PjmGmf6eOEC2ZHBi4O
+HwjIzmLnAgMBAAECggEBAMq1lZyPkh8PCUyLVX3VhC4jRybyAWBI+piKx+4EI6l/
+laP5dZcegCoo+w/mdbTpRHqAWGjec4e9+Nkoq8rLG6B2SCfaRJUYiEQSEvSBHAid
+BZqKK4B82GXQavNU91Vy1OT3vD7mpPXF6jEK6gAA0C4Wt7Lzo7ZfqEavRBDMsNnV
+jOxLwWJCFSKhfeA6grJCnagmEDKSxxazlNBgCahjPf/+IRJZ7Vk4Zjq+I/5nWKf8
+lYaQExKDIANuM/jMRnYVq5k4g2MKHUADWGTSvG1DMJiMHzdxb2miZovpIkEE86bC
+wKBuele9IR6Rb/wygYj7WdaWysZ081OT7mNyju08e4ECgYEA8+q7vv4Nlz8bAcmY
+Ip5517s15M5D9iLsE2Q5m9Zs99rUyQv0E8ekpChhtTSdvj+eNl39O4hji46Gyceu
+MYPfNL7+YWaFDxuyaXEe/OFuKbFqgE1p08HXFcQJCvgqD1MWO5b9BRDc0qpNFIA8
+eN9xFBMQ2UFaALBMAup7Ef85q4kCgYEA8pKOAIsgmlnO8P9cPzkMC1oozslraAti
+1JnOJjwPLoHFubtH2u7WoIkSvNfeNwfrsVXwAP0m7C8p7qhYppS+0XGjKpYNSezK
+1GCqCVv8R1m+AsSseSUUaQCmEydd+gQbBq3r4u3wU3ylrgAoR3m+7SVyhvD+vbwI
+7+zfj+O3zu8CgYEAqaAsQH5c5Tm1hmCztB+RjD1dFWl8ScevdSzWA1HzJcrA/6+Y
+ZckI7kBG8sVMjemgFR735FbNI1hS1DBRK44Rw5SvQv0Qu5j/UeShMCt1ePkwn1k2
+p1S+Rxy1TTOXzGBzra0q+ELpzncwc3lalJSPBu7bYLrZ5HC167E1NSbQ7EECgYBo
+e/IIj+TyNz7pFcVhQixK84HiWGYYQddHJhzi4TnU2XcWonG3/uqZ6ZEVoJIJ+DJw
+h0jC1EggscwJDaBp2GY9Bwq2PD3rGsDfK+fx8ho/jYtH2/lCkVMyS2I9m9Zh68TM
+YrvZWo4LGASxZ0XyS6GOunOTZlkD1uuulMRTUU4KJwKBgQCwyjs0/ElVFvO0lPIC
+JJ//B5rqI7hNMJuTBvr4yiqVZdbgFukaU7FBVyNYDMpZi/nRbpglm+psFcwXtL8n
+bHOIGLkh8vB7OuETRYhXs567lPYtO4BmHZlXW70Sq/0xqi/Mmz1RuEg4SQ1Ug5oy
+wG6IV5EWSQAhsGirdybQ+bY7Kw==
+-----END PRIVATE KEY-----
--- /dev/null
+// -
+// ========================LICENSE_START=================================
+// O-RAN-SC
+// %%
+// Copyright (C) 2021: Nordix Foundation
+// %%
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ========================LICENSE_END===================================
+//
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ port := flag.Int("port", 3904, "The port this SDNR stub will listen on")
+ flag.Parse()
+
+ r := mux.NewRouter()
+ r.HandleFunc("/rests/data/network-topology:network-topology/topology=topology-netconf/node={O-DU-ID}/yang-ext:mount/o-ran-sc-du-hello-world:network-function/du-to-ru-connection={O-RU-ID}", handleData)
+
+ fmt.Println("Starting SDNR on port: ", *port)
+ http.ListenAndServe(fmt.Sprintf(":%v", *port), r)
+
+}
+
+func handleData(w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+ if reqData, err := io.ReadAll(req.Body); err == nil {
+ fmt.Println("SDNR received body: ", string(reqData))
+ }
+}