2 * ============LICENSE_START======================================================================
3 * Copyright (C) 2018, 2020 NOKIA Intellectual Property, 2018-2023 Nordix Foundation. All rights reserved.
4 * ===============================================================================================
5 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
6 * in compliance with the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software distributed under the License
11 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12 * or implied. See the License for the specific language governing permissions and limitations under
14 * ============LICENSE_END========================================================================
17 package org.oran.datafile.tasks;
19 import com.google.gson.Gson;
20 import com.google.gson.GsonBuilder;
22 import java.nio.file.Files;
23 import java.nio.file.Path;
24 import java.nio.file.Paths;
25 import java.time.Duration;
26 import java.util.ArrayList;
27 import java.util.HashMap;
30 import org.apache.kafka.clients.producer.ProducerConfig;
31 import org.apache.kafka.clients.producer.ProducerRecord;
32 import org.apache.kafka.common.header.Header;
33 import org.apache.kafka.common.header.internals.RecordHeader;
34 import org.apache.kafka.common.serialization.StringSerializer;
35 import org.oran.datafile.configuration.AppConfig;
36 import org.oran.datafile.configuration.CertificateConfig;
37 import org.oran.datafile.datastore.DataStore;
38 import org.oran.datafile.datastore.DataStore.Bucket;
39 import org.oran.datafile.exceptions.DatafileTaskException;
40 import org.oran.datafile.http.HttpsClientConnectionManagerUtil;
41 import org.oran.datafile.model.Counters;
42 import org.oran.datafile.model.FileData;
43 import org.oran.datafile.model.FilePublishInformation;
44 import org.oran.datafile.model.FileReadyMessage;
45 import org.slf4j.Logger;
46 import org.slf4j.LoggerFactory;
47 import org.springframework.stereotype.Component;
49 import reactor.core.publisher.Flux;
50 import reactor.core.publisher.Mono;
51 import reactor.core.scheduler.Scheduler;
52 import reactor.core.scheduler.Schedulers;
53 import reactor.kafka.sender.KafkaSender;
54 import reactor.kafka.sender.SenderOptions;
55 import reactor.kafka.sender.SenderRecord;
56 import reactor.kafka.sender.SenderResult;
57 import reactor.util.retry.Retry;
60 * This implements the main flow of the data file collector. Fetch file ready
62 * message router, fetch new files from the PNF publish these in the data
66 public class CollectAndReportFiles {
68 private static Gson gson = new GsonBuilder() //
69 .disableHtmlEscaping() //
72 private static final long FILE_TRANSFER_MAX_RETRIES = 2;
73 private static final Duration FILE_TRANSFER_INITIAL_RETRY_TIMEOUT = Duration.ofSeconds(2);
75 private static final Logger logger = LoggerFactory.getLogger(CollectAndReportFiles.class);
77 private final AppConfig appConfig;
79 private Counters counters = new Counters();
81 private final KafkaSender<String, String> kafkaSender;
83 private final DataStore dataStore;
86 * Constructor for task registration in Datafile Workflow.
88 * @param applicationConfiguration - application configuration
90 public CollectAndReportFiles(AppConfig applicationConfiguration) {
91 this.appConfig = applicationConfiguration;
92 this.kafkaSender = KafkaSender.create(kafkaSenderOptions());
95 this.dataStore = DataStore.create(applicationConfiguration);
100 private void initCerts() {
102 CertificateConfig certificateConfig = appConfig.getCertificateConfiguration();
103 HttpsClientConnectionManagerUtil.setupOrUpdate(certificateConfig.keyCert, certificateConfig.keyPasswordPath,
104 certificateConfig.trustedCa, certificateConfig.trustedCaPasswordPath, true);
105 } catch (DatafileTaskException e) {
106 logger.error("Could not setup HttpsClient certs, reason: {}", e.getMessage());
111 * Main function for scheduling for the file collection Workflow.
113 public void start() {
117 private void start(int delayMillis) {
119 logger.trace("Starting");
120 if (appConfig.isS3Enabled()) {
121 this.dataStore.create(Bucket.FILES).subscribe();
122 this.dataStore.create(Bucket.LOCKS).subscribe();
124 Thread.sleep(delayMillis);
125 createMainTask().subscribe(null, s -> start(2000), null);
126 } catch (Exception e) {
127 logger.error("Unexpected exception: {}", e.toString(), e);
128 Thread.currentThread().interrupt();
132 Flux<FilePublishInformation> createMainTask() {
133 final int noOfWorkerThreads = appConfig.getNoOfWorkerThreads();
134 Scheduler scheduler = Schedulers.newParallel("FileCollectorWorker", noOfWorkerThreads);
135 return fetchFromKafka() //
136 .doOnNext(fileReadyMessage -> counters.threadPoolQueueSize.incrementAndGet()) //
137 .doOnNext(fileReadyMessage -> counters.incNoOfReceivedEvents()) //
138 .parallel(noOfWorkerThreads) // Each FileReadyMessage in a separate thread
140 .doOnNext(fileReadyMessage -> counters.threadPoolQueueSize.decrementAndGet()) //
141 .flatMap(fileReadyMessage -> Flux.fromIterable(FileData.createFileData(fileReadyMessage)), true, 1) //
142 .flatMap(this::filterNotFetched, false, 1, 1) //
143 .flatMap(this::fetchFile, false, 1, 1) //
144 .flatMap(data -> reportFetchedFile(data, appConfig.getCollectedFileTopic()), false, 1) //
146 .doOnError(t -> logger.error("Received error: {}", t.toString())); //
149 private Mono<FileData> deleteLock(FileData info) {
150 return dataStore.deleteLock(lockName(info.name())).map(b -> info); //
153 private Mono<FilePublishInformation> moveFileToS3Bucket(FilePublishInformation info) {
154 if (this.appConfig.isS3Enabled()) {
155 return dataStore.copyFileTo(locaFilePath(info), info.getName())
156 .doOnError(t -> logger.warn("Failed to store file '{}' in S3 {}", info.getName(), t.getMessage())) //
157 .retryWhen(Retry.backoff(4, Duration.ofMillis(1000))) //
159 .doOnError(t -> logger.error("Failed to store file '{}' in S3 after retries {}", info.getName(),
161 .doOnNext(n -> logger.debug("Stored file in S3: {}", info.getName())) //
162 .doOnNext(sig -> deleteLocalFile(info));
164 return Mono.just(info);
168 private Mono<FileData> filterNotFetched(FileData fileData) {
169 Path localPath = fileData.getLocalFilePath(this.appConfig);
171 return dataStore.fileExists(Bucket.FILES, fileData.name()) //
172 .filter(exists -> !exists) //
173 .filter(exists -> !localPath.toFile().exists()) //
174 .map(f -> fileData); //
178 private String lockName(String fileName) {
179 return fileName + ".lck";
182 private Path locaFilePath(FilePublishInformation info) {
183 return Paths.get(appConfig.getCollectedFilesPath(), info.getName());
186 private void deleteLocalFile(FilePublishInformation info) {
187 Path path = locaFilePath(info);
190 } catch (Exception e) {
191 logger.warn("Could not delete local file: {}, reason:{}", path, e.getMessage());
195 private Flux<FilePublishInformation> reportFetchedFile(FilePublishInformation fileData, String topic) {
196 String json = gson.toJson(fileData);
197 return sendDataToStream(topic, fileData.getSourceName(), json) //
198 .map(result -> fileData);
201 public Flux<SenderResult<Integer>> sendDataToStream(String topic, String sourceName, String value) {
202 return sendDataToKafkaStream(Flux.just(senderRecord(topic, sourceName, value)));
205 private SenderRecord<String, String, Integer> senderRecord(String topic, String sourceName, String value) {
206 int correlationMetadata = 2;
208 var producerRecord = new ProducerRecord<>(topic, null, null, key, value, kafkaHeaders(sourceName));
209 return SenderRecord.create(producerRecord, correlationMetadata);
212 private Iterable<Header> kafkaHeaders(String sourceName) {
213 ArrayList<Header> result = new ArrayList<>();
214 Header h = new RecordHeader("SourceName", sourceName.getBytes());
219 private Flux<SenderResult<Integer>> sendDataToKafkaStream(Flux<SenderRecord<String, String, Integer>> dataToSend) {
221 return kafkaSender.send(dataToSend) //
222 .doOnError(e -> logger.error("Send to kafka failed", e));
225 private SenderOptions<String, String> kafkaSenderOptions() {
226 String bootstrapServers = this.appConfig.getKafkaBootStrapServers();
228 Map<String, Object> props = new HashMap<>();
229 props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
230 props.put(ProducerConfig.ACKS_CONFIG, "all");
231 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
232 props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
233 this.appConfig.addKafkaSecurityProps(props);
234 return SenderOptions.create(props);
237 public Counters getCounters() {
238 return this.counters;
241 protected FileCollector createFileCollector() {
242 return new FileCollector(appConfig, counters);
245 private Mono<FilePublishInformation> fetchFile(FileData fileData) {
246 return this.dataStore.createLock(lockName(fileData.name())).filter(granted -> granted) //
247 .map(granted -> createFileCollector()) //
248 .flatMap(collector -> collector.collectFile(fileData, FILE_TRANSFER_MAX_RETRIES,
249 FILE_TRANSFER_INITIAL_RETRY_TIMEOUT)) //
250 .flatMap(this::moveFileToS3Bucket) //
251 .doOnNext(b -> deleteLock(fileData).subscribe()) //
252 .doOnError(b -> deleteLock(fileData).subscribe()) //
253 .onErrorResume(exception -> handleFetchFileFailure(fileData, exception)); //
256 private Mono<FilePublishInformation> handleFetchFileFailure(FileData fileData, Throwable t) {
257 Path localFilePath = fileData.getLocalFilePath(this.appConfig);
258 logger.error("File fetching failed, path {}, reason: {}", fileData.remoteFilePath(), t.getMessage());
259 deleteFile(localFilePath);
260 if (FileData.Scheme.isFtpScheme(fileData.scheme())) {
261 counters.incNoOfFailedFtp();
263 counters.incNoOfFailedHttp();
269 * Fetch more messages from the message router. This is done in a
270 * polling/blocking fashion.
272 private Flux<FileReadyMessage> fetchFromKafka() {
273 KafkaTopicListener listener = new KafkaTopicListener(this.appConfig);
274 return listener.getFlux() //
275 .flatMap(this::parseReceivedFileReadyMessage, 1);
279 Mono<FileReadyMessage> parseReceivedFileReadyMessage(KafkaTopicListener.DataFromTopic data) {
281 FileReadyMessage msg = gson.fromJson(data.value, FileReadyMessage.class);
282 logger.debug("Received: {}", msg);
283 return Mono.just(msg);
284 } catch (Exception e) {
285 logger.warn("Could not parse received: {}, reason: {}", data.value, e.getMessage());
290 private static void deleteFile(Path localFile) {
291 logger.trace("Deleting file: {}", localFile);
293 Files.delete(localFile);
294 } catch (Exception e) {
295 logger.trace("Could not delete file: {}, reason: {}", localFile, e.getMessage());