386945058bbafba4a782a3a628e0a9fd91310806
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 //  This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 //  platform project (RICP).
18
19 // TODO: High-level file comment.
20
21
22
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
26
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
30
31 using namespace std;
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
35
36
37 //#ifdef __cplusplus
38 //extern "C"
39 //{
40 //#endif
41
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
44
45 static void catch_function(int signal) {
46     __gcov_flush();
47     exit(signal);
48 }
49
50
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
52
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
56
57 void init_log() {
58     mdclog_attr_t *attr;
59     mdclog_attr_init(&attr);
60     mdclog_attr_set_ident(attr, "E2Terminator");
61     mdclog_init(attr);
62     mdclog_attr_destroy(attr);
63 }
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
66
67 double age() {
68     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
69 }
70
71 double approx_CPU_MHz(unsigned sleepTime) {
72     using namespace std::chrono_literals;
73     uint32_t aux = 0;
74     uint64_t cycles_start = rdtscp(aux);
75     double time_start = age();
76     std::this_thread::sleep_for(sleepTime * 1ms);
77     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78     double elapsed_time = age() - time_start;
79     return elapsed_cycles / elapsed_time;
80 }
81
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
86
87 int buildListeningPort(sctp_params_t &sctpParams) {
88     sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89     if (sctpParams.listenFD <= 0) {
90         mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
91         return -1;
92     }
93
94     struct sockaddr_in6 serverAddress {};
95     serverAddress.sin6_family = AF_INET6;
96     serverAddress.sin6_addr   = in6addr_any;
97     serverAddress.sin6_port = htons(sctpParams.sctpPort);
98     if (bind(sctpParams.listenFD, (SA *)&serverAddress, sizeof(serverAddress)) < 0 ) {
99         mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
100         return -1;
101     }
102     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
103         //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
104         return -1;
105     }
106     if (mdclog_level_get() >= MDCLOG_DEBUG) {
107         struct sockaddr_in6 clientAddress {};
108         socklen_t len = sizeof(clientAddress);
109         getsockname(sctpParams.listenFD, (SA *)&clientAddress, &len);
110         char buff[1024] {};
111         inet_ntop(AF_INET6, &clientAddress.sin6_addr, buff, sizeof(buff));
112         mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(clientAddress.sin6_port));
113     }
114
115     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
116         mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
117         return -1;
118     }
119     struct epoll_event event {};
120     event.events = EPOLLIN | EPOLLET;
121     event.data.fd = sctpParams.listenFD;
122
123     // add listening port to epoll
124     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
125         printf("Failed to add descriptor to epoll\n");
126         mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
127         return -1;
128     }
129
130     return 0;
131 }
132
133 int buildConfiguration(sctp_params_t &sctpParams) {
134     path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
135     if (exists(p)) {
136         const int size = 2048;
137         auto fileSize = file_size(p);
138         if (fileSize > size) {
139             mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
140             return -1;
141         }
142     } else {
143         mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
144         return -1;
145     }
146
147     ReadConfigFile conf;
148     if (conf.openConfigFile(p.string()) == -1) {
149         mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
150                      p.string().c_str(), strerror(errno));
151         return -1;
152     }
153     int rmrPort = conf.getIntValue("nano");
154     if (rmrPort == -1) {
155         mdclog_write(MDCLOG_ERR, "illegal RMR port ");
156         return -1;
157     }
158     sctpParams.rmrPort = (uint16_t)rmrPort;
159     snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
160
161     auto tmpStr = conf.getStringValue("loglevel");
162     if (tmpStr.length() == 0) {
163         mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
164         tmpStr = "info";
165     }
166     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
167
168     if ((tmpStr.compare("debug")) == 0) {
169         sctpParams.logLevel = MDCLOG_DEBUG;
170     } else if ((tmpStr.compare("info")) == 0) {
171         sctpParams.logLevel = MDCLOG_INFO;
172     } else if ((tmpStr.compare("warning")) == 0) {
173         sctpParams.logLevel = MDCLOG_WARN;
174     } else if ((tmpStr.compare("error")) == 0) {
175         sctpParams.logLevel = MDCLOG_ERR;
176     } else {
177         mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
178         sctpParams.logLevel = MDCLOG_INFO;
179     }
180     mdclog_level_set(sctpParams.logLevel);
181
182     tmpStr = conf.getStringValue("volume");
183     if (tmpStr.length() == 0) {
184         mdclog_write(MDCLOG_ERR, "illegal volume.");
185         return -1;
186     }
187
188     char tmpLogFilespec[VOLUME_URL_SIZE];
189     tmpLogFilespec[0] = 0;
190     sctpParams.volume[0] = 0;
191     snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
192     // copy the name to temp file as well
193     snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
194
195
196     // define the file name in the tmp directory under the volume
197     strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
198
199     sctpParams.myIP = conf.getStringValue("local-ip");
200     if (sctpParams.myIP.length() == 0) {
201         mdclog_write(MDCLOG_ERR, "illegal local-ip.");
202         return -1;
203     }
204
205     int sctpPort = conf.getIntValue("sctp-port");
206     if (sctpPort == -1) {
207         mdclog_write(MDCLOG_ERR, "illegal SCTP port ");
208         return -1;
209     }
210     sctpParams.sctpPort = (uint16_t)sctpPort;
211
212     sctpParams.fqdn = conf.getStringValue("external-fqdn");
213     if (sctpParams.fqdn.length() == 0) {
214         mdclog_write(MDCLOG_ERR, "illegal external-fqdn");
215         return -1;
216     }
217
218     std::string pod = conf.getStringValue("pod_name");
219     if (pod.length() == 0) {
220         mdclog_write(MDCLOG_ERR, "illegal pod_name in config file");
221         return -1;
222     }
223     auto *podName = getenv(pod.c_str());
224     if (podName == nullptr) {
225         mdclog_write(MDCLOG_ERR, "illegal pod_name or environment variable not exists : %s", pod.c_str());
226         return -1;
227
228     } else {
229         sctpParams.podName.assign(podName);
230         if (sctpParams.podName.length() == 0) {
231             mdclog_write(MDCLOG_ERR, "illegal pod_name");
232             return -1;
233         }
234     }
235
236     tmpStr = conf.getStringValue("trace");
237     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
238     if ((tmpStr.compare("start")) == 0) {
239         mdclog_write(MDCLOG_INFO, "Trace set to: start");
240         sctpParams.trace = true;
241     } else if ((tmpStr.compare("stop")) == 0) {
242         mdclog_write(MDCLOG_INFO, "Trace set to: stop");
243         sctpParams.trace = false;
244     }
245     jsonTrace = sctpParams.trace;
246
247     sctpParams.epollTimeOut = -1;
248
249     tmpStr = conf.getStringValue("prometheusPort");
250     if (tmpStr.length() != 0) {
251         sctpParams.prometheusPort = tmpStr;
252     }
253
254     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
255                                                                                     "\"fqdn\": \"%s\","
256                                                                                     "\"pod_name\": \"%s\"}",
257                                             (const char *)sctpParams.myIP.c_str(),
258                                             sctpParams.rmrPort,
259                                             sctpParams.fqdn.c_str(),
260                                             sctpParams.podName.c_str());
261
262     if (mdclog_level_get() >= MDCLOG_INFO) {
263         mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
264         mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
265         mdclog_mdc_add("volume", sctpParams.volume);
266         mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
267         mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
268         mdclog_mdc_add("pod name", sctpParams.podName.c_str());
269
270         mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
271     }
272     mdclog_mdc_clean();
273
274     // Files written to the current working directory
275     boostLogger = logging::add_file_log(
276             keywords::file_name = tmpLogFilespec, // to temp directory
277             keywords::rotation_size = 10 * 1024 * 1024,
278             keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
279             keywords::format = "%Message%"
280             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
281     );
282
283     // Setup a destination folder for collecting rotated (closed) files --since the same volume can use rename()
284     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
285             keywords::target = sctpParams.volume
286     ));
287
288     // Upon restart, scan the directory for files matching the file_name pattern
289     boostLogger->locked_backend()->scan_for_files();
290
291     // Enable auto-flushing after each tmpStr record written
292     if (mdclog_level_get() >= MDCLOG_DEBUG) {
293         boostLogger->locked_backend()->auto_flush(true);
294     }
295
296     return 0;
297 }
298
299 void startPrometheus(sctp_params_t &sctpParams) {
300     sctpParams.prometheusFamily = &BuildCounter()
301             .Name("E2T")
302             .Help("E2T message counter")
303             .Labels({{"POD_NAME", sctpParams.podName}})
304             .Register(*sctpParams.prometheusRegistry);
305
306     string prometheusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
307     if (mdclog_level_get() >= MDCLOG_DEBUG) {
308         mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", prometheusPath.c_str());
309     }
310     sctpParams.prometheusExposer = new Exposer(prometheusPath, 1);
311     sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
312 }
313
314 int main(const int argc, char **argv) {
315     sctp_params_t sctpParams;
316
317     {
318         std::random_device device{};
319         std::mt19937 generator(device());
320         std::uniform_int_distribution<long> distribution(1, (long) 1e12);
321         transactionCounter = distribution(generator);
322     }
323
324 //    uint64_t st = 0;
325 //    uint32_t aux1 = 0;
326 //   st = rdtscp(aux1);
327
328     unsigned num_cpus = std::thread::hardware_concurrency();
329     init_log();
330     mdclog_level_set(MDCLOG_INFO);
331
332     if (std::signal(SIGINT, catch_function) == SIG_ERR) {
333         mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
334         exit(1);
335     }
336     if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
337         mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
338         exit(1);
339     }
340     if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
341         mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
342         exit(1);
343     }
344
345     cpuClock = approx_CPU_MHz(100);
346
347     mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
348
349     auto result = parse(argc, argv, sctpParams);
350
351     if (buildConfiguration(sctpParams) != 0) {
352         exit(-1);
353     }
354
355     //auto registry = std::make_shared<Registry>();
356     sctpParams.prometheusRegistry = std::make_shared<Registry>();
357
358     //sctpParams.prometheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
359
360     startPrometheus(sctpParams);
361
362     // start epoll
363     sctpParams.epoll_fd = epoll_create1(0);
364     if (sctpParams.epoll_fd == -1) {
365         mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
366         exit(-1);
367     }
368
369     getRmrContext(sctpParams);
370     if (sctpParams.rmrCtx == nullptr) {
371         close(sctpParams.epoll_fd);
372         exit(-1);
373     }
374
375     if (buildInotify(sctpParams) == -1) {
376         close(sctpParams.rmrListenFd);
377         rmr_close(sctpParams.rmrCtx);
378         close(sctpParams.epoll_fd);
379         exit(-1);
380     }
381
382     if (buildListeningPort(sctpParams) != 0) {
383         close(sctpParams.rmrListenFd);
384         rmr_close(sctpParams.rmrCtx);
385         close(sctpParams.epoll_fd);
386         exit(-1);
387     }
388
389     sctpParams.sctpMap = new mapWrapper();
390
391     std::vector<std::thread> threads(num_cpus);
392 //    std::vector<std::thread> threads;
393
394     num_cpus = 3;
395     for (unsigned int i = 0; i < num_cpus; i++) {
396         threads[i] = std::thread(listener, &sctpParams);
397
398         cpu_set_t cpuset;
399         CPU_ZERO(&cpuset);
400         CPU_SET(i, &cpuset);
401         int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
402         if (rc != 0) {
403             mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
404         }
405     }
406
407
408     //loop over term_init until first message from xApp
409     handleTermInit(sctpParams);
410
411     for (auto &t : threads) {
412         t.join();
413     }
414
415     return 0;
416 }
417
418 void handleTermInit(sctp_params_t &sctpParams) {
419     sendTermInit(sctpParams);
420     //send to e2 manager init of e2 term
421     //E2_TERM_INIT
422
423     int count = 0;
424     while (true) {
425         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
426         if (xappMessages > 0) {
427             if (mdclog_level_get() >=  MDCLOG_INFO) {
428                 mdclog_write(MDCLOG_INFO, "Got a message from some application, stop sending E2_TERM_INIT");
429             }
430             return;
431         }
432         usleep(100000);
433         count++;
434         if (count % 1000 == 0) {
435             mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
436             sendTermInit(sctpParams);
437         }
438     }
439 }
440
441 void sendTermInit(sctp_params_t &sctpParams) {
442     rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
443     auto count = 0;
444     while (true) {
445         msg->mtype = E2_TERM_INIT;
446         msg->state = 0;
447         rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
448         static unsigned char tx[32];
449         auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
450         rmr_bytes2xact(msg, tx, txLen);
451         msg = rmr_send_msg(sctpParams.rmrCtx, msg);
452         if (msg == nullptr) {
453             msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
454         } else if (msg->state == 0) {
455             rmr_free_msg(msg);
456             if (mdclog_level_get() >=  MDCLOG_INFO) {
457                 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT successfully sent ");
458             }
459             return;
460         } else {
461             if (count % 100 == 0) {
462                 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
463             }
464             sleep(1);
465         }
466         count++;
467     }
468 }
469
470 /**
471  *
472  * @param argc
473  * @param argv
474  * @param sctpParams
475  * @return
476  */
477 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
478     cxxopts::Options options(argv[0], "e2 term help");
479     options.positional_help("[optional args]").show_positional_help();
480     options.allow_unrecognised_options().add_options()
481             ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
482             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
483             ("h,help", "Print help");
484
485     auto result = options.parse(argc, (const char **&)argv);
486
487     if (result.count("help")) {
488         std::cout << options.help({""}) << std::endl;
489         exit(0);
490     }
491     return result;
492 }
493
494 /**
495  *
496  * @param sctpParams
497  * @return -1 failed 0 success
498  */
499 int buildInotify(sctp_params_t &sctpParams) {
500     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
501     if (sctpParams.inotifyFD == -1) {
502         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
503         return -1;
504     }
505
506     sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
507                                              (const char *)sctpParams.configFilePath.c_str(),
508                                              (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
509     if (sctpParams.inotifyWD == -1) {
510         mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to  inotify (inotify_add_watch) %s",
511                      sctpParams.configFilePath.c_str(),
512                      strerror(errno));
513         close(sctpParams.inotifyFD);
514         return -1;
515     }
516
517     struct epoll_event event{};
518     event.events = (EPOLLIN);
519     event.data.fd = sctpParams.inotifyFD;
520     // add listening RMR FD to epoll
521     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
522         mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
523         close(sctpParams.inotifyFD);
524         return -1;
525     }
526     return 0;
527 }
528
529 /**
530  *
531  * @param args
532  * @return
533  */
534 void listener(sctp_params_t *params) {
535     int num_of_SCTP_messages = 0;
536     auto totalTime = 0.0;
537     mdclog_mdc_clean();
538     mdclog_level_set(params->logLevel);
539
540     std::thread::id this_id = std::this_thread::get_id();
541     //save cout
542     streambuf *oldCout = cout.rdbuf();
543     ostringstream memCout;
544     // create new cout
545     cout.rdbuf(memCout.rdbuf());
546     cout << this_id;
547     //return to the normal cout
548     cout.rdbuf(oldCout);
549
550     char tid[32];
551     memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
552     tid[memCout.str().length()] = 0;
553     mdclog_mdc_add("thread id", tid);
554
555     if (mdclog_level_get() >= MDCLOG_DEBUG) {
556         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
557     }
558
559     RmrMessagesBuffer_t rmrMessageBuffer{};
560     //create and init RMR
561     rmrMessageBuffer.rmrCtx = params->rmrCtx;
562
563     auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
564     struct timespec end{0, 0};
565     struct timespec start{0, 0};
566
567     rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
568     rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
569
570     memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
571     rmrMessageBuffer.ka_message_len = params->ka_message_length;
572     rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
573
574     if (mdclog_level_get() >= MDCLOG_DEBUG) {
575         mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
576     }
577
578     ReportingMessages_t message {};
579
580 //    for (int i = 0; i < MAX_RMR_BUFF_ARRAY; i++) {
581 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
582 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
583 //    }
584
585     while (true) {
586         if (mdclog_level_get() >= MDCLOG_DEBUG) {
587             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
588         }
589         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
590         if (numOfEvents == 0) { // time out
591             if (mdclog_level_get() >= MDCLOG_DEBUG) {
592                 mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
593             }
594             continue;
595         } else if (numOfEvents < 0) {
596             if (errno == EINTR) {
597                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
598                     mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
599                 }
600                 continue;
601             }
602             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
603             return;
604         }
605         for (auto i = 0; i < numOfEvents; i++) {
606             if (mdclog_level_get() >= MDCLOG_DEBUG) {
607                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
608             }
609             clock_gettime(CLOCK_MONOTONIC, &message.message.time);
610             start.tv_sec = message.message.time.tv_sec;
611             start.tv_nsec = message.message.time.tv_nsec;
612
613
614             if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
615                 handlepoll_error(events[i], message, rmrMessageBuffer, params);
616             } else if (events[i].events & EPOLLOUT) {
617                 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
618             } else if (params->listenFD == events[i].data.fd) {
619                 if (mdclog_level_get() >= MDCLOG_INFO) {
620                     mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
621                 }
622                 // new connection is requested from RAN  start build connection
623                 while (true) {
624                     struct sockaddr in_addr {};
625                     socklen_t in_len;
626                     char hostBuff[NI_MAXHOST];
627                     char portBuff[NI_MAXSERV];
628
629                     in_len = sizeof(in_addr);
630                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
631                     if(peerInfo == nullptr){
632                         mdclog_write(MDCLOG_ERR, "calloc failed");
633                         break;
634                     }
635                     peerInfo->sctpParams = params;
636                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
637                     if (peerInfo->fileDescriptor == -1) {
638                         if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
639                             /* We have processed all incoming connections. */
640                             break;
641                         } else {
642                             mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
643                             break;
644                         }
645                     }
646                     if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
647                         mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
648                         close(peerInfo->fileDescriptor);
649                         break;
650                     }
651                     auto  ans = getnameinfo(&in_addr, in_len,
652                                             peerInfo->hostName, NI_MAXHOST,
653                                             peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
654                     if (ans < 0) {
655                         mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
656                         close(peerInfo->fileDescriptor);
657                         break;
658                     }
659                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
660                         mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
661                     }
662                     peerInfo->isConnected = false;
663                     peerInfo->gotSetup = false;
664                     if (addToEpoll(params->epoll_fd,
665                                    peerInfo,
666                                    (EPOLLIN | EPOLLET),
667                                    params->sctpMap, nullptr,
668                                    0) != 0) {
669                         break;
670                     }
671                     break;
672                 }
673             } else if (params->rmrListenFd == events[i].data.fd) {
674                 // got message from XAPP
675                 //num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
676                 num_of_messages.fetch_add(1, std::memory_order_release);
677                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
678                     mdclog_write(MDCLOG_DEBUG, "new RMR message");
679                 }
680                 if (receiveXappMessages(params->sctpMap,
681                                         rmrMessageBuffer,
682                                         message.message.time) != 0) {
683                     mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
684                 }
685             } else if (params->inotifyFD == events[i].data.fd) {
686                 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
687                 handleConfigChange(params);
688             } else {
689                 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
690                  * We must read whatever data is available completely, as we are running
691                  *  in edge-triggered mode and won't get a notification again for the same data. */
692                 num_of_messages.fetch_add(1, std::memory_order_release);
693                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
694                     mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
695                 }
696                 receiveDataFromSctp(&events[i],
697                                     params->sctpMap,
698                                     num_of_SCTP_messages,
699                                     rmrMessageBuffer,
700                                     message.message.time);
701             }
702
703             clock_gettime(CLOCK_MONOTONIC, &end);
704             if (mdclog_level_get() >= MDCLOG_INFO) {
705                 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
706                               ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
707             }
708             if (mdclog_level_get() >= MDCLOG_DEBUG) {
709                 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
710                              end.tv_sec - start.tv_sec,
711                              end.tv_nsec - start.tv_nsec);
712             }
713         }
714     }
715 }
716
717 /**
718  *
719  * @param sctpParams
720  */
721 void handleConfigChange(sctp_params_t *sctpParams) {
722     char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
723     const struct inotify_event *event;
724     char *ptr;
725
726     path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
727     auto endlessLoop = true;
728     while (endlessLoop) {
729         auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
730         if (len == -1) {
731             if (errno != EAGAIN) {
732                 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
733                 endlessLoop = false;
734                 continue;
735             }
736             else {
737                 endlessLoop = false;
738                 continue;
739             }
740         }
741
742         for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
743             event = (const struct inotify_event *)ptr;
744             if (event->mask & (uint32_t)IN_ISDIR) {
745                 continue;
746             }
747
748             // the directory name
749             if (sctpParams->inotifyWD == event->wd) {
750                 // not the directory
751             }
752             if (event->len) {
753                 auto  retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
754                 if (retVal != 0) {
755                     continue;
756                 }
757             }
758             // only the file we want
759             if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
760                 if (mdclog_level_get() >= MDCLOG_INFO) {
761                     mdclog_write(MDCLOG_INFO, "Configuration file changed");
762                 }
763                 if (exists(p)) {
764                     const int size = 2048;
765                     auto fileSize = file_size(p);
766                     if (fileSize > size) {
767                         mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
768                         return;
769                     }
770                 } else {
771                     mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
772                     return;
773                 }
774
775                 ReadConfigFile conf;
776                 if (conf.openConfigFile(p.string()) == -1) {
777                     mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
778                                  p.string().c_str(), strerror(errno));
779                     return;
780                 }
781
782                 auto tmpStr = conf.getStringValue("loglevel");
783                 if (tmpStr.length() == 0) {
784                     mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
785                     tmpStr = "info";
786                 }
787                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
788
789                 if ((tmpStr.compare("debug")) == 0) {
790                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
791                     sctpParams->logLevel = MDCLOG_DEBUG;
792                 } else if ((tmpStr.compare("info")) == 0) {
793                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
794                     sctpParams->logLevel = MDCLOG_INFO;
795                 } else if ((tmpStr.compare("warning")) == 0) {
796                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
797                     sctpParams->logLevel = MDCLOG_WARN;
798                 } else if ((tmpStr.compare("error")) == 0) {
799                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
800                     sctpParams->logLevel = MDCLOG_ERR;
801                 } else {
802                     mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
803                     sctpParams->logLevel = MDCLOG_INFO;
804                 }
805                 mdclog_level_set(sctpParams->logLevel);
806
807
808                 tmpStr = conf.getStringValue("trace");
809                 if (tmpStr.length() == 0) {
810                     mdclog_write(MDCLOG_ERR, "illegal trace. Set trace to stop");
811                     tmpStr = "stop";
812                 }
813
814                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
815                 if ((tmpStr.compare("start")) == 0) {
816                     mdclog_write(MDCLOG_INFO, "Trace set to: start");
817                     sctpParams->trace = true;
818                 } else if ((tmpStr.compare("stop")) == 0) {
819                     mdclog_write(MDCLOG_INFO, "Trace set to: stop");
820                     sctpParams->trace = false;
821                 } else {
822                     mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
823                     sctpParams->trace = false;
824                 }
825                 jsonTrace = sctpParams->trace;
826
827
828                 endlessLoop = false;
829             }
830         }
831     }
832 }
833
834 /**
835  *
836  * @param event
837  * @param message
838  * @param rmrMessageBuffer
839  * @param params
840  */
841 void handleEinprogressMessages(struct epoll_event &event,
842                                ReportingMessages_t &message,
843                                RmrMessagesBuffer_t &rmrMessageBuffer,
844                                sctp_params_t *params) {
845     auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
846     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
847
848     mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
849     auto retVal = 0;
850     socklen_t retValLen = 0;
851     auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
852     if (rc != 0 || retVal != 0) {
853         if (rc != 0) {
854             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
855                                                          "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
856                                                          peerInfo->enodbName, strerror(errno));
857         } else if (retVal != 0) {
858             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
859                                                          "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
860                                                          peerInfo->enodbName);
861         }
862
863         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
864         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
865         mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
866         message.message.direction = 'N';
867         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
868             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
869         }
870         memset(peerInfo->asnData, 0, peerInfo->asnLength);
871         peerInfo->asnLength = 0;
872         peerInfo->mtype = 0;
873         return;
874     }
875
876     peerInfo->isConnected = true;
877
878     if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
879                       peerInfo->mtype) != 0) {
880         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
881         return;
882     }
883
884     message.message.asndata = (unsigned char *)peerInfo->asnData;
885     message.message.asnLength = peerInfo->asnLength;
886     message.message.messageType = peerInfo->mtype;
887     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
888     num_of_messages.fetch_add(1, std::memory_order_release);
889     if (mdclog_level_get() >= MDCLOG_DEBUG) {
890         mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
891                      message.message.enodbName);
892     }
893     if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
894         if (mdclog_level_get() >= MDCLOG_DEBUG) {
895             mdclog_write(MDCLOG_DEBUG, "Error write to SCTP  %s %d", __func__, __LINE__);
896         }
897         return;
898     }
899
900     memset(peerInfo->asnData, 0, peerInfo->asnLength);
901     peerInfo->asnLength = 0;
902     peerInfo->mtype = 0;
903 }
904
905
906 void handlepoll_error(struct epoll_event &event,
907                       ReportingMessages_t &message,
908                       RmrMessagesBuffer_t &rmrMessageBuffer,
909                       sctp_params_t *params) {
910     if (event.data.fd != params->rmrListenFd) {
911         auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
912         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
913                      event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
914
915         rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
916                                                      "%s|Failed SCTP Connection",
917                                                      peerInfo->enodbName);
918         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
919         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
920
921         memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
922         message.message.direction = 'N';
923         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
924             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
925         }
926
927         close(peerInfo->fileDescriptor);
928         params->sctpMap->erase(peerInfo->enodbName);
929         cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
930     } else {
931         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
932     }
933 }
934 /**
935  *
936  * @param socket
937  * @return
938  */
939 int setSocketNoBlocking(int socket) {
940     auto flags = fcntl(socket, F_GETFL, 0);
941
942     if (flags == -1) {
943         mdclog_mdc_add("func", "fcntl");
944         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
945         mdclog_mdc_clean();
946         return -1;
947     }
948
949     flags = (unsigned) flags | (unsigned) O_NONBLOCK;
950     if (fcntl(socket, F_SETFL, flags) == -1) {
951         mdclog_mdc_add("func", "fcntl");
952         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
953         mdclog_mdc_clean();
954         return -1;
955     }
956
957     return 0;
958 }
959
960 /**
961  *
962  * @param val
963  * @param m
964  */
965 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
966     char *dummy;
967     auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
968     char searchBuff[2048]{};
969
970     snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
971     m->erase(searchBuff);
972
973     m->erase(val->enodbName);
974     free(val);
975 }
976
977 /**
978  *
979  * @param fd file descriptor
980  * @param data the asn data to send
981  * @param len  length of the data
982  * @param enodbName the enodbName as in the map for printing purpose
983  * @param m map host information
984  * @param mtype message number
985  * @return 0 success, a negative number on fail
986  */
987 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
988     auto loglevel = mdclog_level_get();
989     int fd = peerInfo->fileDescriptor;
990     if (loglevel >= MDCLOG_DEBUG) {
991         mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
992                      message.message.enodbName, __FUNCTION__);
993     }
994
995     while (true) {
996         if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
997             if (errno == EINTR) {
998                 continue;
999             }
1000             mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1001             if (!peerInfo->isConnected) {
1002                 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1003                 return -1;
1004             }
1005             cleanHashEntry(peerInfo, m);
1006             close(fd);
1007             char key[MAX_ENODB_NAME_SIZE * 2];
1008             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1009                      message.message.messageType);
1010             if (loglevel >= MDCLOG_DEBUG) {
1011                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1012             }
1013             auto tmp = m->find(key);
1014             if (tmp) {
1015                 free(tmp);
1016             }
1017             m->erase(key);
1018             return -1;
1019         }
1020         message.message.direction = 'D';
1021         // send report.buffer of size
1022         buildJsonMessage(message);
1023
1024         if (loglevel >= MDCLOG_DEBUG) {
1025             mdclog_write(MDCLOG_DEBUG,
1026                          "SCTP message for CU %s sent from %s",
1027                          message.message.enodbName,
1028                          __FUNCTION__);
1029         }
1030         return 0;
1031     }
1032 }
1033
1034 /**
1035  *
1036  * @param message
1037  * @param rmrMessageBuffer
1038  */
1039 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1040     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1041     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1042
1043     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1044         mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1045                      message.message.enodbName, (unsigned long) message.message.asnLength);
1046     }
1047 }
1048
1049
1050
1051 /**
1052  *
1053  * @param events
1054  * @param sctpMap
1055  * @param numOfMessages
1056  * @param rmrMessageBuffer
1057  * @param ts
1058  * @return
1059  */
1060 int receiveDataFromSctp(struct epoll_event *events,
1061                         Sctp_Map_t *sctpMap,
1062                         int &numOfMessages,
1063                         RmrMessagesBuffer_t &rmrMessageBuffer,
1064                         struct timespec &ts) {
1065     /* We have data on the fd waiting to be read. Read and display it.
1066  * We must read whatever data is available completely, as we are running
1067  *  in edge-triggered mode and won't get a notification again for the same data. */
1068     ReportingMessages_t message {};
1069     auto done = 0;
1070     auto loglevel = mdclog_level_get();
1071
1072     // get the identity of the interface
1073     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1074
1075     struct timespec start{0, 0};
1076     struct timespec decodeStart{0, 0};
1077     struct timespec end{0, 0};
1078
1079     E2AP_PDU_t *pdu = nullptr;
1080
1081     while (true) {
1082         if (loglevel >= MDCLOG_DEBUG) {
1083             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1084             clock_gettime(CLOCK_MONOTONIC, &start);
1085         }
1086         // read the buffer directly to rmr payload
1087         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1088         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1089                 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1090
1091         if (loglevel >= MDCLOG_DEBUG) {
1092             mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1093                          message.peerInfo->fileDescriptor, message.message.asnLength);
1094         }
1095
1096         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1097         message.message.direction = 'U';
1098         message.message.time.tv_nsec = ts.tv_nsec;
1099         message.message.time.tv_sec = ts.tv_sec;
1100
1101         if (message.message.asnLength < 0) {
1102             if (errno == EINTR) {
1103                 continue;
1104             }
1105             /* If errno == EAGAIN, that means we have read all
1106                data. So goReportingMessages_t back to the main loop. */
1107             if (errno != EAGAIN) {
1108                 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1109                 done = 1;
1110             } else if (loglevel >= MDCLOG_DEBUG) {
1111                 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1112             }
1113             break;
1114         } else if (message.message.asnLength == 0) {
1115             /* End of file. The remote has closed the connection. */
1116             if (loglevel >= MDCLOG_INFO) {
1117                 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1118                              message.peerInfo->fileDescriptor);
1119             }
1120             done = 1;
1121             break;
1122         }
1123
1124         if (loglevel >= MDCLOG_DEBUG) {
1125             char printBuffer[RECEIVE_SCTP_BUFFER_SIZE]{};
1126             char *tmp = printBuffer;
1127             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1128                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1129                 tmp += 2;
1130             }
1131             printBuffer[message.message.asnLength] = 0;
1132             clock_gettime(CLOCK_MONOTONIC, &end);
1133             mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1134                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1135             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
1136                          printBuffer);
1137             clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1138         }
1139
1140         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1141                           message.message.asndata, message.message.asnLength);
1142         if (rval.code != RC_OK) {
1143             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1144                          message.peerInfo->enodbName);
1145             break;
1146         }
1147
1148         if (loglevel >= MDCLOG_DEBUG) {
1149             clock_gettime(CLOCK_MONOTONIC, &end);
1150             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1151                          message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1152             char *printBuffer;
1153             size_t size;
1154             FILE *stream = open_memstream(&printBuffer, &size);
1155             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1156             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1157             clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1158
1159             fclose(stream);
1160             free(printBuffer);
1161         }
1162
1163         switch (pdu->present) {
1164             case E2AP_PDU_PR_initiatingMessage: {//initiating message
1165                 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1166                 break;
1167             }
1168             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1169                 asnSuccessfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1170                 break;
1171             }
1172             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1173                 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1174                 break;
1175             }
1176             default:
1177                 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1178                 break;
1179         }
1180         if (loglevel >= MDCLOG_DEBUG) {
1181             clock_gettime(CLOCK_MONOTONIC, &end);
1182             mdclog_write(MDCLOG_DEBUG,
1183                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1184                          message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1185         }
1186         numOfMessages++;
1187         if (pdu != nullptr) {
1188             ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1189             //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1190             //pdu = nullptr;
1191         }
1192     }
1193
1194     if (done) {
1195         if (loglevel >= MDCLOG_INFO) {
1196             mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1197         }
1198         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1199                 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1200                          256,
1201                          "%s|CU disconnected unexpectedly",
1202                          message.peerInfo->enodbName);
1203         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1204
1205         if (sendRequestToXapp(message,
1206                               RIC_SCTP_CONNECTION_FAILURE,
1207                               rmrMessageBuffer) != 0) {
1208             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1209         }
1210
1211         /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1212         close(message.peerInfo->fileDescriptor);
1213         cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1214     }
1215     if (loglevel >= MDCLOG_DEBUG) {
1216         clock_gettime(CLOCK_MONOTONIC, &end);
1217         mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1218                      end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1219
1220     }
1221     return 0;
1222 }
1223
1224 static void buildAndSendSetupRequest(ReportingMessages_t &message,
1225                                      RmrMessagesBuffer_t &rmrMessageBuffer,
1226                                      E2AP_PDU_t *pdu/*,
1227                                      string const &messageName,
1228                                      string const &ieName,
1229                                      vector<string> &functionsToAdd_v,
1230                                      vector<string> &functionsToModified_v*/) {
1231     auto logLevel = mdclog_level_get();
1232     // now we can send the data to e2Mgr
1233
1234     asn_enc_rval_t er;
1235     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1236     unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1237     while (true) {
1238         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1239         if (er.encoded == -1) {
1240             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1241             return;
1242         } else if (er.encoded > (ssize_t) buffer_size) {
1243             buffer_size = er.encoded + 128;
1244             mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1245                          (int) buffer_size,
1246                          asn_DEF_E2AP_PDU.name, buffer_size);
1247             buffer_size = er.encoded + 128;
1248 //            free(buffer);
1249             continue;
1250         }
1251         buffer[er.encoded] = '\0';
1252         break;
1253     }
1254     // encode to xml
1255
1256     string res((char *)buffer);
1257     res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1258     res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1259     res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1260
1261 //    string res {};
1262 //    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1263 //        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1264 //    }
1265     rmr_mbuf_t *rmrMsg;
1266 //    if (res.length() == 0) {
1267 //        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1268 //        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1269 //                               message.peerInfo->sctpParams->myIP.c_str(),
1270 //                               message.peerInfo->sctpParams->rmrPort,
1271 //                               buffer);
1272 //    } else {
1273         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1274         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1275                                message.peerInfo->sctpParams->myIP.c_str(),
1276                                message.peerInfo->sctpParams->rmrPort,
1277                                res.c_str());
1278 //    }
1279
1280     if (logLevel >= MDCLOG_DEBUG) {
1281         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1282     }
1283     // send to RMR
1284     rmrMsg->mtype = message.message.messageType;
1285     rmrMsg->state = 0;
1286     rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1287
1288     static unsigned char tx[32];
1289     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1290     rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1291
1292     rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1293     if (rmrMsg == nullptr) {
1294         mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1295     } else if (rmrMsg->state != 0) {
1296         char meid[RMR_MAX_MEID]{};
1297         if (rmrMsg->state == RMR_ERR_RETRY) {
1298             usleep(5);
1299             rmrMsg->state = 0;
1300             mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1301                          rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1302             rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1303             if (rmrMsg == nullptr) {
1304                 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1305             } else if (rmrMsg->state != 0) {
1306                 mdclog_write(MDCLOG_ERR,
1307                              "RMR Retry failed %s sending request %d to Xapp from %s",
1308                              translateRmrErrorMessages(rmrMsg->state).c_str(),
1309                              rmrMsg->mtype,
1310                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
1311             }
1312         } else {
1313             mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1314                          translateRmrErrorMessages(rmrMsg->state).c_str(),
1315                          rmrMsg->mtype,
1316                          rmr_get_meid(rmrMsg, (unsigned char *) meid));
1317         }
1318     }
1319     message.peerInfo->gotSetup = true;
1320     buildJsonMessage(message);
1321     if (rmrMsg != nullptr) {
1322         rmr_free_msg(rmrMsg);
1323     }
1324 }
1325
1326 #if 0
1327 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1328     auto index = 0;
1329     runFunXML_v.clear();
1330     for (auto j = 0; j < list.list.count; j++) {
1331         auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1332         if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1333             (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1334             // encode to xml
1335             E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1336             auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1337                                    &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1338                                    (void **)&ranFunDef,
1339                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1340                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1341             if (rval.code != RC_OK) {
1342                 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1343                              rval.code,
1344                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1345                 return -1;
1346             }
1347
1348             auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1349             unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1350             memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1351             // encode to xml
1352             auto er = asn_encode_to_buffer(nullptr,
1353                                            ATS_BASIC_XER,
1354                                            &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1355                                            ranFunDef,
1356                                            xml_buffer,
1357                                            xml_buffer_size);
1358             if (er.encoded == -1) {
1359                 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1360                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1361                              strerror(errno));
1362             } else if (er.encoded > (ssize_t)xml_buffer_size) {
1363                 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1364                              (int) xml_buffer_size,
1365                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1366             } else {
1367                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1368                     mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1369                                  asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1370                                  index++,
1371                                  xml_buffer);
1372                 }
1373
1374                 string runFuncs = (char *)(xml_buffer);
1375                 runFunXML_v.emplace_back(runFuncs);
1376             }
1377         }
1378     }
1379     return 0;
1380 }
1381
1382 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1383                                      Sctp_Map_t *sctpMap,
1384                                      ReportingMessages_t &message,
1385                                      vector <string> &RANfunctionsAdded_v,
1386                                      vector <string> &RANfunctionsModified_v) {
1387     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1388     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1389         auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1390         if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1391             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1392                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1393                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1394                                  ie->value.choice.RANfunctions_List.list.count);
1395                 }
1396                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1397                     return -1;
1398                 }
1399             }
1400         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1401             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1402                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1403                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1404                                  ie->value.choice.RANfunctions_List.list.count);
1405                 }
1406                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1407                     return -1;
1408                 }
1409             }
1410         }
1411     }
1412     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1413         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1414                      RANfunctionsAdded_v.size());
1415     }
1416     return 0;
1417 }
1418
1419 #endif
1420
1421
1422 void buildPrometheusList(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1423     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1424     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1425
1426     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1427     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1428
1429     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1430     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1431
1432     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1433     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1434
1435     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1436     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1437     // ---------------------------------------------
1438     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1439     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1440
1441     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1442     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1443
1444     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1445     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1446
1447     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1448     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1449     //-------------------------------------------------------------
1450
1451     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1452     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1453
1454     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1455     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1456
1457     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1458     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1459
1460     //====================================================================================
1461     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1462     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1463
1464     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1465     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1466
1467     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1468     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1469
1470     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1471     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1472
1473     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1474     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1475
1476     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1477     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1478     //---------------------------------------------------------------------------------------------------------
1479     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1480     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1481
1482     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1483     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1484
1485     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1486     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1487     //----------------------------------------------------------------------------------------------------------------
1488     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1489     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1490
1491     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1492     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1493 }
1494 /**
1495  *
1496  * @param pdu
1497  * @param sctpMap
1498  * @param message
1499  * @param RANfunctionsAdded_v
1500  * @return
1501  */
1502 int collectSetupRequestData(E2AP_PDU_t *pdu,
1503                                      Sctp_Map_t *sctpMap,
1504                                      ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1505     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1506     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1507         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1508         if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1509             // get the ran name for meid
1510             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1511                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1512                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1513                     // no message will be sent
1514                     return -1;
1515                 }
1516
1517                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1518                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1519             }
1520         } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1521             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1522                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1523                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1524                                  ie->value.choice.RANfunctions_List.list.count);
1525                 }
1526                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1527                     return -1;
1528                 }
1529             }
1530         } */
1531     }
1532 //    if (mdclog_level_get() >= MDCLOG_DEBUG) {
1533 //        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1534 //                     RANfunctionsAdded_v.size());
1535 //    }
1536     return 0;
1537 }
1538
1539 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1540     E2AP_PDU_t *pdu = nullptr;
1541
1542     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1543         mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1544                      rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1545     }
1546     auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1547                            rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1548     if (rval.code != RC_OK) {
1549         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1550                      rval.code,
1551                      message.message.enodbName);
1552         return -1;
1553     }
1554
1555     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1556     auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1557                                    rmrMessageBuffer.sendMessage->payload, buff_size);
1558     if (er.encoded == -1) {
1559         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1560         return -1;
1561     } else if (er.encoded > (ssize_t)buff_size) {
1562         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1563                      (int)rmrMessageBuffer.sendMessage->len,
1564                      asn_DEF_E2AP_PDU.name,
1565                      __func__,
1566                      __LINE__);
1567         return -1;
1568     }
1569     rmrMessageBuffer.sendMessage->len = er.encoded;
1570     return 0;
1571
1572 }
1573
1574 /**
1575  *
1576  * @param pdu
1577  * @param message
1578  * @param rmrMessageBuffer
1579  */
1580 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1581                           Sctp_Map_t *sctpMap,
1582                           ReportingMessages_t &message,
1583                           RmrMessagesBuffer_t &rmrMessageBuffer) {
1584     auto logLevel = mdclog_level_get();
1585     auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1586     if (logLevel >= MDCLOG_DEBUG) {
1587         mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1588     }
1589     switch (procedureCode) {
1590         case ProcedureCode_id_E2setup: {
1591             if (logLevel >= MDCLOG_DEBUG) {
1592                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1593             }
1594
1595 //            vector <string> RANfunctionsAdded_v;
1596 //            vector <string> RANfunctionsModified_v;
1597 //            RANfunctionsAdded_v.clear();
1598 //            RANfunctionsModified_v.clear();
1599             if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1600                 break;
1601             }
1602
1603             buildPrometheusList(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1604
1605             string messageName("E2setupRequest");
1606             string ieName("E2setupRequestIEs");
1607             message.message.messageType = RIC_E2_SETUP_REQ;
1608             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1609             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1610             buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1611             break;
1612         }
1613         case ProcedureCode_id_RICserviceUpdate: {
1614             if (logLevel >= MDCLOG_DEBUG) {
1615                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1616             }
1617 //            vector <string> RANfunctionsAdded_v;
1618 //            vector <string> RANfunctionsModified_v;
1619 //            RANfunctionsAdded_v.clear();
1620 //            RANfunctionsModified_v.clear();
1621 //            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1622 //                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1623 //                break;
1624 //            }
1625
1626             string messageName("RICserviceUpdate");
1627             string ieName("RICserviceUpdateIEs");
1628             message.message.messageType = RIC_SERVICE_UPDATE;
1629             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1630             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1631
1632             buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1633             break;
1634         }
1635         case ProcedureCode_id_ErrorIndication: {
1636             if (logLevel >= MDCLOG_DEBUG) {
1637                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1638             }
1639             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1640             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1641             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1642                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1643             }
1644             break;
1645         }
1646         case ProcedureCode_id_Reset: {
1647             if (logLevel >= MDCLOG_DEBUG) {
1648                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1649             }
1650
1651             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1652             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1653             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1654                 break;
1655             }
1656
1657             if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1658                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1659             }
1660             break;
1661         }
1662         case ProcedureCode_id_RICindication: {
1663             if (logLevel >= MDCLOG_DEBUG) {
1664                 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1665             }
1666             for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1667                 auto messageSent = false;
1668                 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1669                 if (logLevel >= MDCLOG_DEBUG) {
1670                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1671                 }
1672                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1673                     if (logLevel >= MDCLOG_DEBUG) {
1674                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1675                     }
1676                     if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1677                         static unsigned char tx[32];
1678                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1679                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1680                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1681                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1682                                        (unsigned char *)message.message.enodbName,
1683                                        strlen(message.message.enodbName));
1684                         rmrMessageBuffer.sendMessage->state = 0;
1685                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1686
1687                         //ie->value.choice.RICrequestID.ricInstanceID;
1688                         if (mdclog_level_get() >= MDCLOG_DEBUG) {
1689                             mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1690                                          rmrMessageBuffer.sendMessage->sub_id,
1691                                          rmrMessageBuffer.sendMessage->mtype,
1692                                          ie->value.choice.RICrequestID.ricInstanceID,
1693                                          ie->value.choice.RICrequestID.ricRequestorID);
1694                         }
1695                         message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1696                         message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1697                         sendRmrMessage(rmrMessageBuffer, message);
1698                         messageSent = true;
1699                     } else {
1700                         mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1701                     }
1702                 }
1703                 if (messageSent) {
1704                     break;
1705                 }
1706             }
1707             break;
1708         }
1709         default: {
1710             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1711             message.message.messageType = 0; // no RMR message type yet
1712
1713             buildJsonMessage(message);
1714
1715             break;
1716         }
1717     }
1718 }
1719
1720 /**
1721  *
1722  * @param pdu
1723  * @param message
1724  * @param rmrMessageBuffer
1725  */
1726 void asnSuccessfulMsg(E2AP_PDU_t *pdu,
1727                       Sctp_Map_t *sctpMap,
1728                       ReportingMessages_t &message,
1729                       RmrMessagesBuffer_t &rmrMessageBuffer) {
1730     auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1731     auto logLevel = mdclog_level_get();
1732     if (logLevel >= MDCLOG_INFO) {
1733         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1734     }
1735     switch (procedureCode) {
1736         case ProcedureCode_id_Reset: {
1737             if (logLevel >= MDCLOG_DEBUG) {
1738                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1739             }
1740             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1741             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1742             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1743                 break;
1744             }
1745             if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1746                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1747             }
1748             break;
1749         }
1750         case ProcedureCode_id_RICcontrol: {
1751             if (logLevel >= MDCLOG_DEBUG) {
1752                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1753             }
1754             for (auto i = 0;
1755                  i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1756                 auto messageSent = false;
1757                 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1758                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1759                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1760                 }
1761                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1762                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1763                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1764                     }
1765                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1766                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1767                         rmrMessageBuffer.sendMessage->state = 0;
1768 //                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1769                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1770
1771                         static unsigned char tx[32];
1772                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1773                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1774                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1775                                        (unsigned char *)message.message.enodbName,
1776                                        strlen(message.message.enodbName));
1777
1778                         message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1779                         message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1780                         sendRmrMessage(rmrMessageBuffer, message);
1781                         messageSent = true;
1782                     } else {
1783                         mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1784                     }
1785                 }
1786                 if (messageSent) {
1787                     break;
1788                 }
1789             }
1790
1791             break;
1792         }
1793         case ProcedureCode_id_RICsubscription: {
1794             if (logLevel >= MDCLOG_DEBUG) {
1795                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1796             }
1797             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1798             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1799             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1800                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1801             }
1802             break;
1803         }
1804         case ProcedureCode_id_RICsubscriptionDelete: {
1805             if (logLevel >= MDCLOG_DEBUG) {
1806                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1807             }
1808             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1809             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1810             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1811                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1812             }
1813             break;
1814         }
1815         default: {
1816             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1817             message.message.messageType = 0; // no RMR message type yet
1818             buildJsonMessage(message);
1819
1820             break;
1821         }
1822     }
1823 }
1824
1825 /**
1826  *
1827  * @param pdu
1828  * @param message
1829  * @param rmrMessageBuffer
1830  */
1831 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1832                         Sctp_Map_t *sctpMap,
1833                         ReportingMessages_t &message,
1834                         RmrMessagesBuffer_t &rmrMessageBuffer) {
1835     auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1836     auto logLevel = mdclog_level_get();
1837     if (logLevel >= MDCLOG_INFO) {
1838         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1839     }
1840     switch (procedureCode) {
1841         case ProcedureCode_id_RICcontrol: {
1842             if (logLevel >= MDCLOG_DEBUG) {
1843                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1844             }
1845             for (int i = 0;
1846                  i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1847                 auto messageSent = false;
1848                 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1849                 if (logLevel >= MDCLOG_DEBUG) {
1850                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1851                 }
1852                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1853                     if (logLevel >= MDCLOG_DEBUG) {
1854                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1855                     }
1856                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1857                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1858                         rmrMessageBuffer.sendMessage->state = 0;
1859 //                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1860                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1861                         static unsigned char tx[32];
1862                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1863                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1864                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1865                                        strlen(message.message.enodbName));
1866                         message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1867                         message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1868                         sendRmrMessage(rmrMessageBuffer, message);
1869                         messageSent = true;
1870                     } else {
1871                         mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1872                     }
1873                 }
1874                 if (messageSent) {
1875                     break;
1876                 }
1877             }
1878             break;
1879         }
1880         case ProcedureCode_id_RICsubscription: {
1881             if (logLevel >= MDCLOG_DEBUG) {
1882                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1883             }
1884             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1885             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1886             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1887                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1888             }
1889             break;
1890         }
1891         case ProcedureCode_id_RICsubscriptionDelete: {
1892             if (logLevel >= MDCLOG_DEBUG) {
1893                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1894             }
1895             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1896             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1897             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1898                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1899             }
1900             break;
1901         }
1902         default: {
1903             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1904             message.message.messageType = 0; // no RMR message type yet
1905
1906             buildJsonMessage(message);
1907
1908             break;
1909         }
1910     }
1911 }
1912
1913 /**
1914  *
1915  * @param message
1916  * @param requestId
1917  * @param rmrMmessageBuffer
1918  * @return
1919  */
1920 int sendRequestToXapp(ReportingMessages_t &message,
1921                       int requestId,
1922                       RmrMessagesBuffer_t &rmrMmessageBuffer) {
1923     rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1924                    (unsigned char *)message.message.enodbName,
1925                    strlen(message.message.enodbName));
1926     message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1927     rmrMmessageBuffer.sendMessage->state = 0;
1928     static unsigned char tx[32];
1929     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1930     rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1931
1932     auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1933     return rc;
1934 }
1935
1936 /**
1937  *
1938  * @param pSctpParams
1939  */
1940 void getRmrContext(sctp_params_t &pSctpParams) {
1941     pSctpParams.rmrCtx = nullptr;
1942     pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1943     if (pSctpParams.rmrCtx == nullptr) {
1944         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1945         return;
1946     }
1947
1948     rmr_set_stimeout(pSctpParams.rmrCtx, 0);    // disable retries for any send operation
1949     // we need to find that routing table exist and we can run
1950     if (mdclog_level_get() >= MDCLOG_INFO) {
1951         mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
1952     }
1953     int rmrReady = 0;
1954     int count = 0;
1955     while (!rmrReady) {
1956         if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
1957             sleep(1);
1958         }
1959         count++;
1960         if (count % 60 == 0) {
1961             mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
1962         }
1963     }
1964     if (mdclog_level_get() >= MDCLOG_INFO) {
1965         mdclog_write(MDCLOG_INFO, "RMR running");
1966     }
1967     rmr_init_trace(pSctpParams.rmrCtx, 200);
1968     // get the RMR fd for the epoll
1969     pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
1970     struct epoll_event event{};
1971     // add RMR fd to epoll
1972     event.events = (EPOLLIN);
1973     event.data.fd = pSctpParams.rmrListenFd;
1974     // add listening RMR FD to epoll
1975     if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
1976         mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
1977         close(pSctpParams.rmrListenFd);
1978         rmr_close(pSctpParams.rmrCtx);
1979         pSctpParams.rmrCtx = nullptr;
1980     }
1981 }
1982
1983 /**
1984  *
1985  * @param message
1986  * @param rmrMessageBuffer
1987  * @return
1988  */
1989 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1990     E2AP_PDU_t *pdu = nullptr;
1991
1992     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1993         mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
1994                 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
1995     }
1996     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1997                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
1998     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1999         mdclog_write(MDCLOG_DEBUG, "%s After  decoding the XML to PDU", __func__ );
2000     }
2001     if (rval.code != RC_OK) {
2002         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
2003                      rval.code,
2004                      message.message.enodbName);
2005         return -1;
2006     }
2007
2008     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2009     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2010                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
2011     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2012         mdclog_write(MDCLOG_DEBUG, "%s After encoding PDU to PER", __func__ );
2013     }
2014     if (er.encoded == -1) {
2015         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2016         return -1;
2017     } else if (er.encoded > (ssize_t)buff_size) {
2018         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2019                      (int)rmrMessageBuffer.rcvMessage->len,
2020                      asn_DEF_E2AP_PDU.name,
2021                      __func__,
2022                      __LINE__);
2023         return -1;
2024     }
2025     rmrMessageBuffer.rcvMessage->len = er.encoded;
2026     return 0;
2027 }
2028
2029 /**
2030  *
2031  * @param sctpMap
2032  * @param rmrMessageBuffer
2033  * @param ts
2034  * @return
2035  */
2036 int receiveXappMessages(Sctp_Map_t *sctpMap,
2037                         RmrMessagesBuffer_t &rmrMessageBuffer,
2038                         struct timespec &ts) {
2039     int loglevel = mdclog_level_get();
2040     if (rmrMessageBuffer.rcvMessage == nullptr) {
2041         //we have error
2042         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2043         return -1;
2044     }
2045
2046 //    if (loglevel >= MDCLOG_DEBUG) {
2047 //        mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2048 //    }
2049     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2050     if (rmrMessageBuffer.rcvMessage == nullptr) {
2051         mdclog_write(MDCLOG_ERR, "RMR Receiving message with null pointer, Reallocated rmr message buffer");
2052         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2053         return -2;
2054     }
2055     ReportingMessages_t message;
2056     message.message.direction = 'D';
2057     message.message.time.tv_nsec = ts.tv_nsec;
2058     message.message.time.tv_sec = ts.tv_sec;
2059
2060     // get message payload
2061     //auto msgData = msg->payload;
2062     if (rmrMessageBuffer.rcvMessage->state != 0) {
2063         mdclog_write(MDCLOG_ERR, "RMR Receiving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2064         return -1;
2065     }
2066     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2067     message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2068     if (message.peerInfo == nullptr) {
2069         auto type = rmrMessageBuffer.rcvMessage->mtype;
2070         switch (type) {
2071             case RIC_SCTP_CLEAR_ALL:
2072             case E2_TERM_KEEP_ALIVE_REQ:
2073             case RIC_HEALTH_CHECK_REQ:
2074                 break;
2075             default:
2076                 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2077                 return -1;
2078         }
2079     }
2080
2081     if (rmrMessageBuffer.rcvMessage->mtype != RIC_HEALTH_CHECK_REQ) {
2082         num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
2083
2084     }
2085     switch (rmrMessageBuffer.rcvMessage->mtype) {
2086         case RIC_E2_SETUP_RESP : {
2087             if (loglevel >= MDCLOG_DEBUG) {
2088                 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_RESP");
2089             }
2090             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2091                 break;
2092             }
2093             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2094             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2095             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2096                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2097                 return -6;
2098             }
2099             break;
2100         }
2101         case RIC_E2_SETUP_FAILURE : {
2102             if (loglevel >= MDCLOG_DEBUG) {
2103                 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_FAILURE");
2104             }
2105             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2106                 break;
2107             }
2108             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2109             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2110             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2111                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2112                 return -6;
2113             }
2114             break;
2115         }
2116         case RIC_ERROR_INDICATION: {
2117             if (loglevel >= MDCLOG_DEBUG) {
2118                 mdclog_write(MDCLOG_DEBUG, "RIC_ERROR_INDICATION");
2119             }
2120             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2121             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2122             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2123                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2124                 return -6;
2125             }
2126             break;
2127         }
2128         case RIC_SUB_REQ: {
2129             if (loglevel >= MDCLOG_DEBUG) {
2130                 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_REQ");
2131             }
2132             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2133             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2134             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2135                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2136                 return -6;
2137             }
2138             break;
2139         }
2140         case RIC_SUB_DEL_REQ: {
2141             if (loglevel >= MDCLOG_DEBUG) {
2142                 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_DEL_REQ");
2143             }
2144             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2145             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2146             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2147                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2148                 return -6;
2149             }
2150             break;
2151         }
2152         case RIC_CONTROL_REQ: {
2153             if (loglevel >= MDCLOG_DEBUG) {
2154                 mdclog_write(MDCLOG_DEBUG, "RIC_CONTROL_REQ");
2155             }
2156             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2157             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2158             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2159                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2160                 return -6;
2161             }
2162             break;
2163         }
2164         case RIC_SERVICE_QUERY: {
2165             if (loglevel >= MDCLOG_DEBUG) {
2166                 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_QUERY");
2167             }
2168             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2169                 break;
2170             }
2171             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2172             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2173             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2174                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2175                 return -6;
2176             }
2177             break;
2178         }
2179         case RIC_SERVICE_UPDATE_ACK: {
2180             if (loglevel >= MDCLOG_DEBUG) {
2181                 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_ACK");
2182             }
2183             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2184                 mdclog_write(MDCLOG_ERR, "error in PER_FromXML");
2185                 break;
2186             }
2187             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2188             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2189             if (loglevel >= MDCLOG_DEBUG) {
2190                 mdclog_write(MDCLOG_DEBUG, "Before sending to CU");
2191             }
2192             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2193                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2194                 return -6;
2195             }
2196             break;
2197         }
2198         case RIC_SERVICE_UPDATE_FAILURE: {
2199             if (loglevel >= MDCLOG_DEBUG) {
2200                 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_FAILURE");
2201             }
2202             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2203                 break;
2204             }
2205             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2206             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2207             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2208                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2209                 return -6;
2210             }
2211             break;
2212         }
2213         case RIC_E2_RESET_REQ: {
2214             if (loglevel >= MDCLOG_DEBUG) {
2215                 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_REQ");
2216             }
2217             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2218                 break;
2219             }
2220             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2221             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2222             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2223                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2224                 return -6;
2225             }
2226             break;
2227         }
2228         case RIC_E2_RESET_RESP: {
2229             if (loglevel >= MDCLOG_DEBUG) {
2230                 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_RESP");
2231             }
2232             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2233                 break;
2234             }
2235             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2236             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2237             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2238                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2239                 return -6;
2240             }
2241             break;
2242         }
2243         case RIC_SCTP_CLEAR_ALL: {
2244             mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2245             // loop on all keys and close socket and then erase all map.
2246             vector<char *> v;
2247             sctpMap->getKeys(v);
2248             for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2249                 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2250                     auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2251                     if (peerInfo == nullptr) {
2252                         continue;
2253                     }
2254                     close(peerInfo->fileDescriptor);
2255                     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2256                     message.message.direction = 'D';
2257                     message.message.time.tv_nsec = ts.tv_nsec;
2258                     message.message.time.tv_sec = ts.tv_sec;
2259
2260                     message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2261                             snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2262                                      256,
2263                                      "%s|RIC_SCTP_CLEAR_ALL",
2264                                      peerInfo->enodbName);
2265                     message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2266                     mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2267                     if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2268                         mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2269                     }
2270                     free(peerInfo);
2271                 }
2272             }
2273
2274             sleep(1);
2275             sctpMap->clear();
2276             break;
2277         }
2278         case E2_TERM_KEEP_ALIVE_REQ: {
2279             // send message back
2280             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2281                               (unsigned char *)rmrMessageBuffer.ka_message,
2282                               rmrMessageBuffer.ka_message_len);
2283             rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2284             rmrMessageBuffer.sendMessage->state = 0;
2285             static unsigned char tx[32];
2286             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2287             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2288             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2289             if (rmrMessageBuffer.sendMessage == nullptr) {
2290                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2291                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2292             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2293                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2294                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2295             } else if (loglevel >= MDCLOG_DEBUG) {
2296                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2297             }
2298
2299             break;
2300         }
2301         case RIC_HEALTH_CHECK_REQ: {
2302             static int counter = 0;
2303             // send message back
2304             rmr_bytes2payload(rmrMessageBuffer.rcvMessage,
2305                               (unsigned char *)"OK",
2306                               2);
2307             rmrMessageBuffer.rcvMessage->mtype = RIC_HEALTH_CHECK_RESP;
2308             rmrMessageBuffer.rcvMessage->state = 0;
2309             static unsigned char tx[32];
2310             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2311             rmr_bytes2xact(rmrMessageBuffer.rcvMessage, tx, txLen);
2312             rmrMessageBuffer.rcvMessage = rmr_rts_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2313             //rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2314             if (rmrMessageBuffer.rcvMessage == nullptr) {
2315                 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2316                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2317             } else if (rmrMessageBuffer.rcvMessage->state != 0)  {
2318                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2319                              rmrMessageBuffer.rcvMessage->state, translateRmrErrorMessages(rmrMessageBuffer.rcvMessage->state).c_str());
2320             } else if (loglevel >= MDCLOG_DEBUG && ++counter % 100 == 0) {
2321                 mdclog_write(MDCLOG_DEBUG, "Got %d RIC_HEALTH_CHECK_REQ Request send : OK", counter);
2322             }
2323
2324             break;
2325         }
2326
2327         default:
2328             mdclog_write(MDCLOG_WARN, "Message Type : %d is not supported", rmrMessageBuffer.rcvMessage->mtype);
2329             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2330             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2331             message.message.time.tv_nsec = ts.tv_nsec;
2332             message.message.time.tv_sec = ts.tv_sec;
2333             message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2334
2335             buildJsonMessage(message);
2336
2337
2338             return -7;
2339     }
2340     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2341         mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2342     }
2343     return 0;
2344 }
2345
2346 /**
2347  * Send message to the CU that is not expecting for successful or unsuccessful results
2348  * @param messageBuffer
2349  * @param message
2350  * @param failedMsgId
2351  * @param sctpMap
2352  * @return
2353  */
2354 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2355                            ReportingMessages_t &message,
2356                            int failedMsgId,
2357                            Sctp_Map_t *sctpMap) {
2358     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2359         mdclog_write(MDCLOG_DEBUG, "send message: %d to %s address", message.message.messageType, message.message.enodbName);
2360     }
2361
2362     getRequestMetaData(message, messageBuffer);
2363     if (mdclog_level_get() >= MDCLOG_INFO) {
2364         mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2365     }
2366
2367     auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2368     return rc;
2369 }
2370
2371 /**
2372  *
2373  * @param sctpMap
2374  * @param messageBuffer
2375  * @param message
2376  * @param failedMesgId
2377  * @return
2378  */
2379 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2380                     RmrMessagesBuffer_t &messageBuffer,
2381                     ReportingMessages_t &message,
2382                     int failedMesgId) {
2383     // get the FD
2384     message.message.messageType = messageBuffer.rcvMessage->mtype;
2385     auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2386     return rc;
2387 }
2388
2389
2390 /**
2391  *
2392  * @param epoll_fd
2393  * @param peerInfo
2394  * @param events
2395  * @param sctpMap
2396  * @param enodbName
2397  * @param msgType
2398  * @return
2399  */
2400 int addToEpoll(int epoll_fd,
2401                ConnectedCU_t *peerInfo,
2402                uint32_t events,
2403                Sctp_Map_t *sctpMap,
2404                char *enodbName,
2405                int msgType) {
2406     // Add to Epol
2407     struct epoll_event event{};
2408     event.data.ptr = peerInfo;
2409     event.events = events;
2410     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2411         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2412             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here), %s, %s %d",
2413                          strerror(errno), __func__, __LINE__);
2414         }
2415         close(peerInfo->fileDescriptor);
2416         if (enodbName != nullptr) {
2417             cleanHashEntry(peerInfo, sctpMap);
2418             char key[MAX_ENODB_NAME_SIZE * 2];
2419             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2420             if (mdclog_level_get() >= MDCLOG_DEBUG) {
2421                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2422             }
2423             auto tmp = sctpMap->find(key);
2424             if (tmp) {
2425                 free(tmp);
2426                 sctpMap->erase(key);
2427             }
2428         } else {
2429             peerInfo->enodbName[0] = 0;
2430         }
2431         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2432         return -1;
2433     }
2434     return 0;
2435 }
2436
2437 /**
2438  *
2439  * @param epoll_fd
2440  * @param peerInfo
2441  * @param events
2442  * @param sctpMap
2443  * @param enodbName
2444  * @param msgType
2445  * @return
2446  */
2447 int modifyToEpoll(int epoll_fd,
2448                   ConnectedCU_t *peerInfo,
2449                   uint32_t events,
2450                   Sctp_Map_t *sctpMap,
2451                   char *enodbName,
2452                   int msgType) {
2453     // Add to Epol
2454     struct epoll_event event{};
2455     event.data.ptr = peerInfo;
2456     event.events = events;
2457     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2458         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2459             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may check not to quit here), %s, %s %d",
2460                          strerror(errno), __func__, __LINE__);
2461         }
2462         close(peerInfo->fileDescriptor);
2463         cleanHashEntry(peerInfo, sctpMap);
2464         char key[MAX_ENODB_NAME_SIZE * 2];
2465         snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2466         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2467             mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2468         }
2469         auto tmp = sctpMap->find(key);
2470         if (tmp) {
2471             free(tmp);
2472         }
2473         sctpMap->erase(key);
2474         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2475         return -1;
2476     }
2477     return 0;
2478 }
2479
2480
2481 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2482     buildJsonMessage(message);
2483
2484     rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2485
2486     if (rmrMessageBuffer.sendMessage == nullptr) {
2487         rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2488         mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2489         return -1;
2490     }
2491
2492     if (rmrMessageBuffer.sendMessage->state != 0) {
2493         char meid[RMR_MAX_MEID]{};
2494         if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2495             usleep(5);
2496             rmrMessageBuffer.sendMessage->state = 0;
2497             mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2498                          rmrMessageBuffer.sendMessage->mtype,
2499                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2500             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2501             if (rmrMessageBuffer.sendMessage == nullptr) {
2502                 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2503                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2504                 return -1;
2505             } else if (rmrMessageBuffer.sendMessage->state != 0) {
2506                 mdclog_write(MDCLOG_ERR,
2507                              "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2508                              translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2509                              rmrMessageBuffer.sendMessage->mtype,
2510                              rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2511                 auto rc = rmrMessageBuffer.sendMessage->state;
2512                 return rc;
2513             }
2514         } else {
2515             mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2516                          translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2517                          rmrMessageBuffer.sendMessage->mtype,
2518                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2519             return rmrMessageBuffer.sendMessage->state;
2520         }
2521     }
2522     return 0;
2523 }
2524
2525 void buildJsonMessage(ReportingMessages_t &message) {
2526     if (jsonTrace) {
2527         message.outLen = sizeof(message.base64Data);
2528         base64::encode((const unsigned char *) message.message.asndata,
2529                        (const int) message.message.asnLength,
2530                        message.base64Data,
2531                        message.outLen);
2532         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2533             mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2534                          (int) message.message.asnLength,
2535                          (int) message.outLen);
2536         }
2537
2538         snprintf(message.buffer, sizeof(message.buffer),
2539                  "{\"header\": {\"ts\": \"%ld.%09ld\","
2540                  "\"ranName\": \"%s\","
2541                  "\"messageType\": %d,"
2542                  "\"direction\": \"%c\"},"
2543                  "\"base64Length\": %d,"
2544                  "\"asnBase64\": \"%s\"}",
2545                  message.message.time.tv_sec,
2546                  message.message.time.tv_nsec,
2547                  message.message.enodbName,
2548                  message.message.messageType,
2549                  message.message.direction,
2550                  (int) message.outLen,
2551                  message.base64Data);
2552         static src::logger_mt &lg = my_logger::get();
2553
2554         BOOST_LOG(lg) << message.buffer;
2555     }
2556 }
2557
2558
2559 /**
2560  * take RMR error code to string
2561  * @param state
2562  * @return
2563  */
2564 string translateRmrErrorMessages(int state) {
2565     string str = {};
2566     switch (state) {
2567         case RMR_OK:
2568             str = "RMR_OK - state is good";
2569             break;
2570         case RMR_ERR_BADARG:
2571             str = "RMR_ERR_BADARG - argument passed to function was unusable";
2572             break;
2573         case RMR_ERR_NOENDPT:
2574             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2575             break;
2576         case RMR_ERR_EMPTY:
2577             str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2578             break;
2579         case RMR_ERR_NOHDR:
2580             str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2581             break;
2582         case RMR_ERR_SENDFAILED:
2583             str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2584             break;
2585         case RMR_ERR_CALLFAILED:
2586             str = "RMR_ERR_CALLFAILED - unable to send call() message";
2587             break;
2588         case RMR_ERR_NOWHOPEN:
2589             str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2590             break;
2591         case RMR_ERR_WHID:
2592             str = "RMR_ERR_WHID - wormhole id was invalid";
2593             break;
2594         case RMR_ERR_OVERFLOW:
2595             str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2596             break;
2597         case RMR_ERR_RETRY:
2598             str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2599             break;
2600         case RMR_ERR_RCVFAILED:
2601             str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2602             break;
2603         case RMR_ERR_TIMEOUT:
2604             str = "RMR_ERR_TIMEOUT - message processing call timed out";
2605             break;
2606         case RMR_ERR_UNSET:
2607             str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2608             break;
2609         case RMR_ERR_TRUNC:
2610             str = "RMR_ERR_TRUNC - received message likely truncated";
2611             break;
2612         case RMR_ERR_INITFAILED:
2613             str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2614             break;
2615         case RMR_ERR_NOTSUPP:
2616             str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2617             break;
2618         default:
2619             char buf[128]{};
2620             snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
2621             str = buf;
2622             break;
2623     }
2624     return str;
2625 }
2626
2627