gerrit-change-number: ${{ inputs.GERRIT_CHANGE_NUMBER }}
gerrit-patchset-number: ${{ inputs.GERRIT_PATCHSET_NUMBER }}
vote-type: clear
- comment-only: true
- name: Allow replication
run: sleep 10s
gerrit-change-number: ${{ inputs.GERRIT_CHANGE_NUMBER }}
gerrit-patchset-number: ${{ inputs.GERRIT_PATCHSET_NUMBER }}
vote-type: ${{ env.WORKFLOW_CONCLUSION }}
- comment-only: true
throw new CapifAccessException("Unexpected error");\r
}\r
\r
- //TODO The below should be uncommented once SME Manager provides an accessible URI\r
-\r
-// String helloWorldEndpoint = "";\r
-// List<String> apiSetEndpoints = getApiSetEndpoints(apiResponse, baseUrl);\r
-// if (apiSetEndpoints != null && !apiSetEndpoints.isEmpty()) {\r
-// helloWorldEndpoint = apiSetEndpoints.get(0);\r
-// }\r
-//\r
-// if (helloWorldEndpoint != null && !helloWorldEndpoint.isEmpty()) {\r
-// try {\r
-// String responseHelloWorld = restTemplate.getForObject(helloWorldEndpoint, String.class);\r
-// logger.info("Response :- ", responseHelloWorld);\r
-// } catch (IllegalArgumentException e) {\r
-// throw new CapifAccessException("Error accessing the URL :- " + helloWorldEndpoint);\r
-// } catch (Exception e) {\r
-// throw new CapifAccessException("Unexpected error");\r
-// }\r
-// }\r
+ String helloWorldEndpoint = "";\r
+ List<String> apiSetEndpoints = getApiSetEndpoints(apiResponse, baseUrl);\r
+ if (apiSetEndpoints != null && !apiSetEndpoints.isEmpty()) {\r
+ helloWorldEndpoint = apiSetEndpoints.get(0);\r
+ }\r
+\r
+ if (helloWorldEndpoint != null && !helloWorldEndpoint.isEmpty()) {\r
+ try {\r
+ String responseHelloWorld = restTemplate.getForObject(helloWorldEndpoint, String.class);\r
+ logger.info("rApp SME Provider Response : {}", responseHelloWorld);\r
+ } catch (IllegalArgumentException e) {\r
+ throw new CapifAccessException("Error accessing the URL :- " + helloWorldEndpoint);\r
+ } catch (Exception e) {\r
+ throw new CapifAccessException("Unexpected error");\r
+ }\r
+ }\r
}\r
}\r
\r
*.zip
*.tar.gz
*.rar
+*.tgz
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
// make the consumer available for graceful shutdown
setKafkaConsumer(consumer);
consumer.assign(Collections.singleton(new TopicPartition(topicName, 0)));
- //consumer.seekToBeginning(consumer.assignment()); //--from-beginning
int recNum = numOfRecs;
while (recNum > 0) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(TIME_OUT_MS));
import com.demo.consumer.repository.InfoType;
import com.demo.consumer.repository.InfoTypes;
import com.demo.consumer.repository.Job.Parameters;
+import com.demo.consumer.repository.Job.Parameters.KafkaDeliveryInfo;
import com.demo.consumer.dme.ConsumerJobInfo;
-import com.demo.consumer.dme.ConsumerStatusInfo;
+import com.demo.consumer.dme.JobDataSchema;
import com.demo.consumer.repository.Jobs;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
this.jobs.addJob(request.infoTypeId, types.getType(request.infoTypeId), request.owner,
toJobParameters(request.jobDefinition));
} catch (Exception e) {
- log.error("Error adding the job" + infoJobId, e.getMessage());
+ log.error("Error adding the job " + infoJobId + "{}", e.getMessage());
}
}
@PostMapping("/info-type-status")
public void statusChange(@RequestBody String requestBody) {
- ConsumerStatusInfo request = gson.fromJson(requestBody, ConsumerStatusInfo.class);
- log.info("Add Status Job Info", request);
+ JobDataSchema request = gson.fromJson(requestBody, JobDataSchema.class);
+ log.debug("Body Received: {}" , requestBody);
+ try {
+ this.jobs.addJob(request.getInfo_type_id(), types.getType(request.getInfo_type_id()), "",
+ new Parameters(new KafkaDeliveryInfo(
+ request.getJob_data_schema().getTopic(),
+ request.getJob_data_schema().getBootStrapServers(), 0)));
+ } catch (Exception e) {
+ log.error("Error adding the info type " + request.getInfo_type_id() + "{}", e.getMessage());
+ }
}
private Parameters toJobParameters(Object jobData) {
--- /dev/null
+/*-
+ * ========================LICENSE_START=================================
+ * O-RAN-SC
+ *
+ * Copyright (C) 2024: OpenInfra Foundation Europe
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ========================LICENSE_END===================================
+ */
+
+package com.demo.consumer.dme;
+
+import com.google.gson.annotations.SerializedName;
+import lombok.Data;
+import lombok.Getter;
+import lombok.Setter;
+
+import com.demo.consumer.repository.Job.Parameters.KafkaDeliveryInfo;
+import com.google.gson.Gson;
+
+@Data
+public class JobDataSchema {
+
+ public enum InfoJobStatusValues {
+ REGISTERED, UNREGISTERED
+ }
+ @SerializedName("info_type_id")
+ private String info_type_id;
+ @SerializedName("job_data_schema")
+ private DataSchema job_data_schema;
+ @SerializedName("status")
+ private InfoJobStatusValues status;
+
+ @Override
+ public String toString() {
+ return new Gson().toJson(this);
+ }
+
+ @Getter
+ @Setter
+ public class DataSchema {
+ private String title;
+ private String description;
+ @SerializedName("topic")
+ private String topic;
+ @SerializedName("bootStrapServers")
+ private String bootStrapServers;
+ }
+}
@Component
public class PropertiesHelper {
private static final Logger log = LoggerFactory.getLogger(PropertiesHelper.class);
+ private static String kafkaServers = null;
public static Properties getProperties() throws Exception {
- Properties props = null;
+ Properties props = new Properties();
try (InputStream input = SimpleConsumer.class.getClassLoader().getResourceAsStream("config.properties")) {
- props = new Properties();
if (input == null) {
- log.error("Found no configuration file in resources");
- throw new Exception("Sorry, unable to find config.properties");
+ log.error("Failed to load configuration file 'config.properties'");
+ throw new IOException("Configuration file 'config.properties' not found");
}
props.load(input);
- String kafkaServers = System.getenv("KAFKA_SERVERS");
- if (kafkaServers != null) {
+ setBootstrapServers(props);
+ } catch (IOException e) {
+ log.error("Error reading configuration file: ", e);
+ throw e;
+ }
+ return props;
+ }
+
+ private static void setBootstrapServers(Properties props) {
+ if (kafkaServers != null && !kafkaServers.isEmpty()) {
+ props.setProperty("bootstrap.servers", kafkaServers);
+ log.info("Using actively bootstrap servers: {}", kafkaServers);
+ } else {
+ String kafkaServersEnv = System.getenv("KAFKA_SERVERS");
+ if (kafkaServersEnv != null && !kafkaServersEnv.isEmpty()) {
+ kafkaServers = kafkaServersEnv;
props.setProperty("bootstrap.servers", kafkaServers);
- log.info("Env variable KAFKA_SERVERS found, adding: " + kafkaServers);
+ log.info("Using environment variable KAFKA_SERVERS: {}", kafkaServers);
} else {
- log.info("Env variable KAFKA_SERVERS not found, defaulting to config file");
+ log.info("Environment variable KAFKA_SERVERS not found, defaulting to config file");
}
- } catch (IOException e) {
- log.error("Error reading configuration file: ", e.getMessage());
}
- return props;
+ }
+
+ public static void setKafkaServers(String servers) {
+ kafkaServers = servers;
}
}
import com.fasterxml.jackson.annotation.JsonProperty;
+import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
@ToString
public class Job {
+ @AllArgsConstructor
@Builder
public static class Parameters {
+ @AllArgsConstructor
@Builder
@EqualsAndHashCode
public static class KafkaDeliveryInfo {
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
+import com.demo.consumer.messages.PropertiesHelper;
import com.demo.consumer.repository.Job.Parameters;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
public void addJob(String id, InfoType type, String owner, Parameters parameters) {
Job job = new Job(id, type, owner, parameters);
+ setKafkaServersEnvironment(job);
this.put(job);
}
+ private void setKafkaServersEnvironment(Job job) {
+ String kafkaServers = job.getParameters().getDeliveryInfo().getBootStrapServers();
+ if (kafkaServers != null && !kafkaServers.isEmpty()) {
+ PropertiesHelper.setKafkaServers(kafkaServers);
+ logger.info("Setting variable bootStrapServers: {}", kafkaServers);
+ } else {
+ logger.warn("bootStrapServers is not set for job: {}", job.getId());
+ }
+ }
+
private synchronized void put(Job job) {
logger.debug("Put job: {}", job.getId());
allJobs.put(job.getId(), job);
vars:
time: 1000
- autostart: true
+ autostart: false
topic: mytopic #This topic is used only in autostart
spring:
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
--- /dev/null
+apiVersion: v2
+name: ics-consumer
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.consumer.name }}
+spec:
+ replicas: {{ .Values.consumer.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ .Values.consumer.name }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.consumer.name }}
+ spec:
+ containers:
+ - name: {{ .Values.consumer.name }}
+ image: "{{ .Values.consumer.image.repository }}:{{ .Values.consumer.image.tag }}"
+ ports:
+ - containerPort: {{ .Values.consumer.service.port }}
+ env:
+ - name: KAFKA_SERVERS
+ value: "{{ .Values.kafka.host }}:{{ .Values.kafka.port }}"
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.consumer.name }}
+spec:
+ type: NodePort
+ ports:
+ - port: {{ .Values.consumer.service.port }}
+ targetPort: {{ .Values.consumer.service.port }}
+ nodePort: {{ .Values.consumer.service.nodePort }}
+ selector:
+ app: {{ .Values.consumer.name }}
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+consumer:
+ name: kafka-consumer
+ replicaCount: 1
+ image:
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-sample-icsconsumer
+ tag: 0.0.1
+ service:
+ port: 8081
+ nodePort: 30081
+
+kafka:
+ host: kafka-service
+ port: 9092
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
--- /dev/null
+apiVersion: v2
+name: ics-producer
+description: A Helm chart for Kubernetes
+type: application
+version: 0.1.0
+appVersion: "1.16.0"
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.producer.name }}
+spec:
+ replicas: {{ .Values.producer.replicaCount }}
+ selector:
+ matchLabels:
+ app: {{ .Values.producer.name }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.producer.name }}
+ spec:
+ containers:
+ - name: {{ .Values.producer.name }}
+ image: "{{ .Values.producer.image.repository }}:{{ .Values.producer.image.tag }}"
+ ports:
+ - containerPort: {{ .Values.producer.service.port }}
+ env:
+ - name: KAFKA_SERVERS
+ value: "{{ .Values.kafka.host }}:{{ .Values.kafka.port }}"
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ .Values.producer.name }}
+spec:
+ type: NodePort
+ ports:
+ - port: {{ .Values.producer.service.port }}
+ targetPort: {{ .Values.producer.service.port }}
+ nodePort: {{ .Values.producer.service.nodePort }}
+ selector:
+ app: {{ .Values.producer.name }}
--- /dev/null
+#
+# ========================LICENSE_START=================================
+# O-RAN-SC
+#
+# Copyright (C) 2024: OpenInfra Foundation Europe
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========================LICENSE_END===================================
+#
+producer:
+ name: kafka-producer
+ replicaCount: 1
+ image:
+ repository: nexus3.o-ran-sc.org:10004/o-ran-sc/nonrtric-sample-icsproducer
+ tag: 0.0.1
+ service:
+ port: 8080
+ nodePort: 30080
+
+kafka:
+ host: kafka-service
+ port: 9092
@RequestMapping(path = "/producer", produces = "application/json")
public class ProducerController {
private static final Logger log = LoggerFactory.getLogger(ProducerController.class);
-
private static Gson gson = new GsonBuilder().create();
-
private final Jobs jobs;
private final InfoTypes types;
private String topicName = "mytopic";
-
public ProducerController(@Autowired Jobs jobs, @Autowired InfoTypes types) {
this.jobs = jobs;
this.types = types;
@Component
public class PropertiesHelper {
private static final Logger log = LoggerFactory.getLogger(PropertiesHelper.class);
+ private static String kafkaServers = null;
public static Properties getProperties() throws Exception {
- Properties props = null;
+ Properties props = new Properties();
try (InputStream input = SimpleProducer.class.getClassLoader().getResourceAsStream("config.properties")) {
- props = new Properties();
if (input == null) {
- log.error("Found no configuration file in resources");
- throw new Exception("Sorry, unable to find config.properties");
+ log.error("Failed to load configuration file 'config.properties'");
+ throw new IOException("Configuration file 'config.properties' not found");
}
props.load(input);
- String kafkaServers = System.getenv("KAFKA_SERVERS");
- if (kafkaServers != null) {
+ setBootstrapServers(props);
+ } catch (IOException e) {
+ log.error("Error reading configuration file: ", e);
+ throw e;
+ }
+ return props;
+ }
+
+ private static void setBootstrapServers(Properties props) {
+ if (kafkaServers != null && !kafkaServers.isEmpty()) {
+ props.setProperty("bootstrap.servers", kafkaServers);
+ log.info("Using actively bootstrap servers: {}", kafkaServers);
+ } else {
+ String kafkaServersEnv = System.getenv("KAFKA_SERVERS");
+ if (kafkaServersEnv != null && !kafkaServersEnv.isEmpty()) {
+ kafkaServers = kafkaServersEnv;
props.setProperty("bootstrap.servers", kafkaServers);
- log.info("Env variable KAFKA_SERVERS found, adding: " + kafkaServers);
+ log.info("Using environment variable KAFKA_SERVERS: {}", kafkaServers);
} else {
- log.info("Env variable KAFKA_SERVERS not found, defaulting to config file");
+ log.info("Environment variable KAFKA_SERVERS not found, defaulting to config file");
}
- } catch (IOException e) {
- log.error("Error reading configuration file: ", e.getMessage());
}
- return props;
+ }
+
+ public static void setKafkaServers(String servers) {
+ kafkaServers = servers;
}
}
@Getter
@Setter
private Object inputJobDefinition;
-
}
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
+import com.demo.producer.messages.PropertiesHelper;
import com.demo.producer.repository.Job.Parameters;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
public void addJob(String id, InfoType type, String owner, Parameters parameters) {
Job job = new Job(id, type, owner, parameters);
+ setKafkaServersEnvironment(job);
this.put(job);
}
+ private void setKafkaServersEnvironment(Job job) {
+ String kafkaServers = job.getParameters().getDeliveryInfo().getBootStrapServers();
+ if (kafkaServers != null && !kafkaServers.isEmpty()) {
+ PropertiesHelper.setKafkaServers(kafkaServers);
+ logger.info("Setting variable bootStrapServers: {}", kafkaServers);
+ } else {
+ logger.warn("bootStrapServers is not set for job: {}", job.getId());
+ }
+ }
+
private synchronized void put(Job job) {
logger.debug("Put job: {}", job.getId());
allJobs.put(job.getId(), job);
#
vars:
time: 1000
- autostart: true
+ autostart: false
topic: mytopic #This topic is used only in autostart
spring:
echo "Kafka container is up and running. Starting producer and consumer..."
space
-echo "Start 1 Producer on mytopic"
-curl -X GET http://localhost:8080/startProducer/mytopic
-space
-
-echo "Start 1 Consumer on mytopic"
-curl -X GET http://localhost:8081/startConsumer/mytopic
-space
-
-sleep 10
-
echo "Sending type1 to ICS"
curl -X 'PUT' \
'http://localhost:8083/data-producer/v1/info-types/type1' \
"$schema":"http://json-schema.org/draft-07/schema#",
"title":"STD_Type1_1.0.0",
"description":"Type 1",
- "type":"object"
- }
+ "topic": "mytopic",
+ "bootStrapServers": "kafka-zkless:9092"
+ }
}'
echo "Getting types from ICS"
"job_definition": {
"deliveryInfo": {
"topic": "mytopic",
- "bootStrapServers": "http://kafka-zkless:9092",
- "numberOfMessages": 0
+ "bootStrapServers": "kafka-zkless:9092",
+ "numberOfMessages": 100
}
},
"job_result_uri": "http://kafka-producer:8080/producer/job",
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
- "status_result_uri": "http://kafka-consumer:8081/info-type-status",
- "owner": "owner"
+ "status_result_uri": "http://kafka-consumer:8081/consumer/info-type-status",
+ "owner": "demo"
}'
echo "Getting Consumer Subscription Job infos from ICS"
curl -X 'GET' 'http://localhost:8083/data-consumer/v1/info-type-subscription/1' -H 'accept: application/json'
space
-sleep 5
+#To Set Kafka Broker in Consumer
+echo "Sending type1 to ICS to use the callback"
+curl -X 'PUT' \
+ 'http://localhost:8083/data-producer/v1/info-types/type1' \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "info_job_data_schema": {
+ "$schema":"http://json-schema.org/draft-07/schema#",
+ "title":"STD_Type1_1.0.0",
+ "description":"Type 1",
+ "topic": "mytopic",
+ "bootStrapServers": "kafka-zkless:9092"
+ }
+}'
+
+#Using the autostart flag in the application.yaml
+echo "Start 1 Producer on mytopic"
+curl -X GET http://localhost:8080/startProducer/mytopic
+space
+
+echo "Start 1 Consumer on mytopic"
+curl -X GET http://localhost:8081/startConsumer/mytopic
+space
+
+sleep 10
+
echo "ICS Producer Docker logs "
docker logs informationcoordinatorservice | grep -E 'o.o.i.c.r1producer.ProducerCallbacks|o.o.i.repository.InfoTypeSubscriptions'
space
for container in "${containers[@]}"; do
if docker logs "$container" | grep -q ERROR; then
echo "Errors found in logs of $container"
+ docker logs "$container" | grep ERROR
echo "FAIL"
exit 1
else
fi
}
-# Function to wait for a Docker container to be running and log a specific string
+# Function to wait for a Docker container to be running and log a specific string with a maximum timeout of 20 minutes
wait_for_container() {
local container_name="$1"
local log_string="$2"
+ local timeout=1200 # Timeout set to 20 minutes (20 minutes * 60 seconds)
+
+ local start_time=$(date +%s)
+ local end_time=$((start_time + timeout))
while ! docker inspect "$container_name" &>/dev/null; do
echo "Waiting for container '$container_name' to be created..."
sleep 5
+ if [ "$(date +%s)" -ge "$end_time" ]; then
+ echo "Timeout: Container creation exceeded 20 minutes."
+ exit 1
+ fi
done
while [ "$(docker inspect -f '{{.State.Status}}' "$container_name")" != "running" ]; do
echo "Waiting for container '$container_name' to be running..."
sleep 5
+ if [ "$(date +%s)" -ge "$end_time" ]; then
+ echo "Timeout: Container start exceeded 20 minutes."
+ exit 1
+ fi
done
# Check container logs for the specified string
while ! docker logs "$container_name" 2>&1 | grep "$log_string"; do
echo "Waiting for '$log_string' in container logs of '$container_name'..."
sleep 5
+ if [ "$(date +%s)" -ge "$end_time" ]; then
+ echo "Timeout: Log string not found within 20 minutes."
+ exit 1
+ fi
done
}
+
space() {
echo ""
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++"
a1pms_api_get_policy_status 404 1
a1pms_api_get_policy_status 404 2
- VAL='NOT IN EFFECT'
- a1pms_api_get_policy_status 200 5000 OSC "$VAL" "false"
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H] ]] || [[ $TEST_ENV_PROFILE =~ ^ONAP-[A-L] ]]; then
+ VAL='NOT IN EFFECT'
+ VAL2="false"
+ VAL3=EMPTY
+ VAL4=EMPTY
+ else
+ VAL="NOT_ENFORCED"
+ VAL2="OTHER_REASON"
+ VAL3="NOT_ENFORCED"
+ VAL4="OTHER_REASON"
+ fi
+ a1pms_api_get_policy_status 200 5000 OSC "$VAL" "$VAL2"
a1pms_api_get_policy_status 200 5100 STD "UNDEFINED"
- a1pms_api_get_policy_status 200 5200 STD2 EMPTY EMPTY
+ a1pms_api_get_policy_status 200 5200 STD2 $VAL3 $VAL4
deviation "TR10 - a1pms allows policy creation on unregistered service (side effect of orig. problem)- test combo $interface and $__httpx"
controller_api_get_A1_policy_status 200 OSC ricsim_g1_1 1 4000
controller_api_get_A1_policy_status 200 STD ricsim_g2_1 5000
-
- VAL='NOT_ENFORCED'
- controller_api_get_A1_policy_status 200 OSC ricsim_g1_1 1 4000 "$VAL" "OTHER_REASON"
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H] ]] || [[ $TEST_ENV_PROFILE =~ ^ONAP-[A-L] ]]; then
+ VAL='NOT IN EFFECT'
+ VAL2="false"
+ else
+ VAL='NOT_ENFORCED'
+ VAL2="OTHER_REASON"
+ fi
+ controller_api_get_A1_policy_status 200 OSC ricsim_g1_1 1 4000 "$VAL" "$VAL2"
controller_api_get_A1_policy_status 200 STD ricsim_g2_1 5000 "UNDEFINED"
RESP=202
controller_api_get_A1_policy_status 200 OSC ricsim_g1_1 1 4000
controller_api_get_A1_policy_status 200 STD ricsim_g2_1 5000
- VAL='NOT IN EFFECT'
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H]$ || $TEST_ENV_PROFILE =~ ^ONAP-[A-L]$ ]]; then
+ VAL='NOT IN EFFECT'
+ VAL2="false"
+ else
+ VAL='NOT_ENFORCED'
+ VAL2="OTHER_REASON"
+ fi
controller_api_get_A1_policy_status 200 OSC ricsim_g1_1 1 4000 "$VAL" "false"
controller_api_get_A1_policy_status 200 STD ricsim_g2_1 5000 "UNDEFINED"
done
# Check status OSC
- VAL='NOT IN EFFECT'
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H] ]] || [[ $TEST_ENV_PROFILE =~ ^ONAP-[A-L] ]]; then
+ VAL='NOT IN EFFECT'
+ VAL2="false"
+ else
+ VAL='NOT_ENFORCED'
+ VAL2="OTHER_REASON"
+ fi
for ((i=1; i<=$OSC_NUM_RICS; i++))
do
- a1pms_api_get_policy_status 200 $((3000+$i)) OSC "$VAL" "false"
- a1pms_api_get_policy_status 200 $((4000+$i)) OSC "$VAL" "false"
+ a1pms_api_get_policy_status 200 $((3000+$i)) OSC "$VAL" "$VAL2"
+ a1pms_api_get_policy_status 200 $((4000+$i)) OSC "$VAL" "$VAL2"
done
# Note: Status callback is not tested since this callback (http POST) is made from the
##########################################
-suite_complete
\ No newline at end of file
+suite_complete
+
+exit
TEST_DIRECTORY="test/auto-test"
TEST_SCRIPT="./Suite-Verify-jobs.sh"
DOCKER_COMPOSE_VERSION="v2.21.0"
+PULL_IMAGE_TYPE="remote-remove"
+RUN_MODE="docker"
+IMAGE_VERSION="release"
+ENV_FLAG="--env-file"
+ENV_FILE="../common/test_env-oran-h-release.sh"
# Check if jq is installed, and install it if not
if ! command -v jq &> /dev/null; then
cd "$TEST_DIRECTORY"
sudo chmod 775 "$TEST_SCRIPT"
-"$TEST_SCRIPT" remote-remove docker release --env-file ../common/test_env-oran-h-release.sh
+"$TEST_SCRIPT" $PULL_IMAGE_TYPE $RUN_MODE $IMAGE_VERSION $ENV_FLAG $ENV_FILE
+exit_val=$?
# Remove docker-compose after tests are done
if command -v docker-compose &> /dev/null; then
echo "Removing jq..."
sudo apt-get remove -y jq
fi
+
+exit $exit_val
fi
targetJson=$targetJson"}"
elif [ "$3" == "OSC" ]; then
- targetJson="{\"instance_status\":\"$4\""
- if [ $# -eq 5 ]; then
- targetJson=$targetJson",\"has_been_deleted\":\"$5\""
- fi
- targetJson=$targetJson",\"created_at\":\"????\"}"
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H] ]] || [[ $TEST_ENV_PROFILE =~ ^ONAP-[A-L] ]]; then
+ targetJson="{\"instance_status\":\"$4\""
+ if [ $# -eq 5 ]; then
+ targetJson=$targetJson",\"has_been_deleted\":\"$5\""
+ fi
+ targetJson=$targetJson",\"created_at\":\"????\"}"
+ else
+ targetJson="{\"enforceStatus\":\"$4\""
+ if [ $# -eq 5 ]; then
+ targetJson=$targetJson",\"enforceReason\":\"$5\"}"
+ fi
+ fi
else
__print_err "<response-code> (STD <enforce-status> [<reason>])|(OSC <instance-status> <has-been-deleted>)" $@
return 1
if [ $# -ge 5 ] && [ $2 == "OSC" ]; then
url="$ric_id/a1-p/policytypes/$4/policies/$UUID$5/status"
if [ $# -gt 5 ]; then
- targetJson="{\"enforceStatus\":\"$6\""
- targetJson=$targetJson",\"enforceReason\":\"$7\"}"
+ if [[ $TEST_ENV_PROFILE =~ ^ORAN-[A-H] ]] || [[ $TEST_ENV_PROFILE =~ ^ONAP-[A-L] ]]; then
+ targetJson="{\"instance_status\":\"$6\""
+ targetJson=$targetJson",\"has_been_deleted\":\"$7\""
+ targetJson=$targetJson",\"created_at\":\"????\"}"
+ else
+ targetJson="{\"enforceStatus\":\"$6\""
+ targetJson=$targetJson",\"enforceReason\":\"$7\"}"
+ fi
fi
paramError=0
elif [ $# -ge 4 ] && [ $2 == "STD" ]; then
#If response is too long, truncate
result="...response text too long, omitted"
fi
- echo -ne " Waiting for {ENTITY} $BOLD${appname}$EBOLD service status on ${3}, result: $result${SAMELINE}"
+ echo -ne " Waiting for ${ENTITY} $BOLD${appname}$EBOLD service status on ${url}, result: $result${SAMELINE}"
echo -ne " The ${ENTITY} $BOLD${appname}$EBOLD$GREEN is alive$EGREEN, responds to service status:$GREEN $result $EGREEN on ${url} after $(($SECONDS-$TSTART)) seconds"
a1pmsst=true
break
total=$((TCSUITE_PASS_CTR+TCSUITE_FAIL_CTR))
if [ $TCSUITE_CTR -eq 0 ]; then
- echo -e "\033[1mNo test cases seem to have executed. Check the script....\033[0m"
- elif [ $total != $TCSUITE_CTR ]; then
+ echo -e "\033[1mNo test cases seem to have executed. Check the script....\033[0m"
+ elif [ $total != $TCSUITE_CTR ]; then
echo -e "\033[1mTotal number of test cases does not match the sum of passed and failed test cases. Check the script....\033[0m"
fi
echo "Number of test cases : " $TCSUITE_CTR
echo "FAIL test cases"
cat .tmp_tcsuite_fail
echo ""
+ if [ $TCSUITE_FAIL_CTR -ne 0 ]; then
+ echo "################################### Test suite completed with Tests FAIL ##############################"
+ echo "#################################################################################################"
+ else
+ echo "################################### Test suite completed ##############################"
+ echo "#################################################################################################"
+ fi
- echo "################################### Test suite completed ##############################"
- echo "#################################################################################################"
+ exit $TCSUITE_FAIL_CTR
}
\ No newline at end of file