5.0.4 Fix Prometheus bug in number of bytes.
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 //  This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 //  platform project (RICP).
18
19 // TODO: High-level file comment.
20
21
22
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
26
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
30
31 using namespace std;
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
35
36
37 //#ifdef __cplusplus
38 //extern "C"
39 //{
40 //#endif
41
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
44
45 static void catch_function(int signal) {
46     __gcov_flush();
47     exit(signal);
48 }
49
50
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
52
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
56
57 void init_log() {
58     mdclog_attr_t *attr;
59     mdclog_attr_init(&attr);
60     mdclog_attr_set_ident(attr, "E2Terminator");
61     mdclog_init(attr);
62     mdclog_attr_destroy(attr);
63 }
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
66
67 double age() {
68     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
69 }
70
71 double approx_CPU_MHz(unsigned sleeptime) {
72     using namespace std::chrono_literals;
73     uint32_t aux = 0;
74     uint64_t cycles_start = rdtscp(aux);
75     double time_start = age();
76     std::this_thread::sleep_for(sleeptime * 1ms);
77     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78     double elapsed_time = age() - time_start;
79     return elapsed_cycles / elapsed_time;
80 }
81
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
86
87 int buildListeningPort(sctp_params_t &sctpParams) {
88     sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89     if (sctpParams.listenFD <= 0) {
90         mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
91         return -1;
92     }
93
94     struct sockaddr_in6 servaddr {};
95     servaddr.sin6_family = AF_INET6;
96     servaddr.sin6_addr   = in6addr_any;
97     servaddr.sin6_port = htons(sctpParams.sctpPort);
98     if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
99         mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
100         return -1;
101     }
102     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
103         //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
104         return -1;
105     }
106     if (mdclog_level_get() >= MDCLOG_DEBUG) {
107         struct sockaddr_in6 cliaddr {};
108         socklen_t len = sizeof(cliaddr);
109         getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
110         char buff[1024] {};
111         inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
112         mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
113     }
114
115     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
116         mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
117         return -1;
118     }
119     struct epoll_event event {};
120     event.events = EPOLLIN | EPOLLET;
121     event.data.fd = sctpParams.listenFD;
122
123     // add listening port to epoll
124     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
125         printf("Failed to add descriptor to epoll\n");
126         mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
127         return -1;
128     }
129
130     return 0;
131 }
132
133 int buildConfiguration(sctp_params_t &sctpParams) {
134     path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
135     if (exists(p)) {
136         const int size = 2048;
137         auto fileSize = file_size(p);
138         if (fileSize > size) {
139             mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
140             return -1;
141         }
142     } else {
143         mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
144         return -1;
145     }
146
147     ReadConfigFile conf;
148     if (conf.openConfigFile(p.string()) == -1) {
149         mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
150                      p.string().c_str(), strerror(errno));
151         return -1;
152     }
153     int rmrPort = conf.getIntValue("nano");
154     if (rmrPort == -1) {
155         mdclog_write(MDCLOG_ERR, "illigal RMR port ");
156         return -1;
157     }
158     sctpParams.rmrPort = (uint16_t)rmrPort;
159     snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
160
161     auto tmpStr = conf.getStringValue("loglevel");
162     if (tmpStr.length() == 0) {
163         mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
164         tmpStr = "info";
165     }
166     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
167
168     if ((tmpStr.compare("debug")) == 0) {
169         sctpParams.logLevel = MDCLOG_DEBUG;
170     } else if ((tmpStr.compare("info")) == 0) {
171         sctpParams.logLevel = MDCLOG_INFO;
172     } else if ((tmpStr.compare("warning")) == 0) {
173         sctpParams.logLevel = MDCLOG_WARN;
174     } else if ((tmpStr.compare("error")) == 0) {
175         sctpParams.logLevel = MDCLOG_ERR;
176     } else {
177         mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
178         sctpParams.logLevel = MDCLOG_INFO;
179     }
180     mdclog_level_set(sctpParams.logLevel);
181
182     tmpStr = conf.getStringValue("volume");
183     if (tmpStr.length() == 0) {
184         mdclog_write(MDCLOG_ERR, "illigal volume.");
185         return -1;
186     }
187
188     char tmpLogFilespec[VOLUME_URL_SIZE];
189     tmpLogFilespec[0] = 0;
190     sctpParams.volume[0] = 0;
191     snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
192     // copy the name to temp file as well
193     snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
194
195
196     // define the file name in the tmp directory under the volume
197     strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
198
199     sctpParams.myIP = conf.getStringValue("local-ip");
200     if (sctpParams.myIP.length() == 0) {
201         mdclog_write(MDCLOG_ERR, "illigal local-ip.");
202         return -1;
203     }
204
205     int sctpPort = conf.getIntValue("sctp-port");
206     if (sctpPort == -1) {
207         mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
208         return -1;
209     }
210     sctpParams.sctpPort = (uint16_t)sctpPort;
211
212     sctpParams.fqdn = conf.getStringValue("external-fqdn");
213     if (sctpParams.fqdn.length() == 0) {
214         mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
215         return -1;
216     }
217
218     std::string pod = conf.getStringValue("pod_name");
219     if (pod.length() == 0) {
220         mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
221         return -1;
222     }
223     auto *podName = getenv(pod.c_str());
224     if (podName == nullptr) {
225         mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
226         return -1;
227
228     } else {
229         sctpParams.podName.assign(podName);
230         if (sctpParams.podName.length() == 0) {
231             mdclog_write(MDCLOG_ERR, "illigal pod_name");
232             return -1;
233         }
234     }
235
236     tmpStr = conf.getStringValue("trace");
237     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
238     if ((tmpStr.compare("start")) == 0) {
239         mdclog_write(MDCLOG_INFO, "Trace set to: start");
240         sctpParams.trace = true;
241     } else if ((tmpStr.compare("stop")) == 0) {
242         mdclog_write(MDCLOG_INFO, "Trace set to: stop");
243         sctpParams.trace = false;
244     }
245     jsonTrace = sctpParams.trace;
246
247     sctpParams.epollTimeOut = -1;
248     tmpStr = conf.getStringValue("prometheusMode");
249     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
250     sctpParams.prometheusMode = tmpStr;
251     if (tmpStr.length() != 0) {
252         if (tmpStr.compare("push") == 0) {
253             sctpParams.prometheusPushAddress = tmpStr;
254             auto timeout = conf.getIntValue("prometheusPushTimeOut");
255             if (timeout >= 5 && timeout <= 300) {
256                 sctpParams.epollTimeOut = timeout * 1000;
257             } else {
258                 sctpParams.epollTimeOut = 10 * 1000;
259             }
260         }
261     }
262
263     tmpStr = conf.getStringValue("prometheusPushAddr");
264     if (tmpStr.length() != 0) {
265         sctpParams.prometheusPushAddress = tmpStr;
266     }
267
268     tmpStr = conf.getStringValue("prometheusPort");
269     if (tmpStr.length() != 0) {
270         sctpParams.prometheusPort = tmpStr;
271     }
272
273     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
274                                                                                     "\"fqdn\": \"%s\","
275                                                                                     "\"pod_name\": \"%s\"}",
276                                             (const char *)sctpParams.myIP.c_str(),
277                                             sctpParams.rmrPort,
278                                             sctpParams.fqdn.c_str(),
279                                             sctpParams.podName.c_str());
280
281     if (mdclog_level_get() >= MDCLOG_INFO) {
282         mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
283         mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
284         mdclog_mdc_add("volume", sctpParams.volume);
285         mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
286         mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
287         mdclog_mdc_add("pod name", sctpParams.podName.c_str());
288
289         mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
290     }
291     mdclog_mdc_clean();
292
293     // Files written to the current working directory
294     boostLogger = logging::add_file_log(
295             keywords::file_name = tmpLogFilespec, // to temp directory
296             keywords::rotation_size = 10 * 1024 * 1024,
297             keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
298             keywords::format = "%Message%"
299             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
300     );
301
302     // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
303     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
304             keywords::target = sctpParams.volume
305     ));
306
307     // Upon restart, scan the directory for files matching the file_name pattern
308     boostLogger->locked_backend()->scan_for_files();
309
310     // Enable auto-flushing after each tmpStr record written
311     if (mdclog_level_get() >= MDCLOG_DEBUG) {
312         boostLogger->locked_backend()->auto_flush(true);
313     }
314
315     return 0;
316 }
317
318 static std::string GetHostName() {
319     char hostname[1024];
320
321     if (::gethostname(hostname, sizeof(hostname))) {
322         return {};
323     }
324     return hostname;
325 }
326
327 void startPrometheus(sctp_params_t &sctpParams) {
328     sctpParams.prometheusFamily = &BuildCounter()
329             .Name("E2T")
330             .Help("E2T message counter")
331             .Labels({{"POD_NAME", sctpParams.podName}})
332             .Register(*sctpParams.prometheusRegistry);
333
334     if (strcmp(sctpParams.prometheusMode.c_str(),"pull") == 0) {
335         if (mdclog_level_get() >= MDCLOG_DEBUG) {
336             mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s:%s", sctpParams.myIP.c_str(), sctpParams.prometheusPort.c_str());
337         }
338         sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
339         sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
340     } else if (strcmp(sctpParams.prometheusMode.c_str(),"push") == 0) {
341         if (mdclog_level_get() >= MDCLOG_DEBUG) {
342             mdclog_write(MDCLOG_DEBUG, "Start Prometheus Push mode");
343         }
344         const auto labels = Gateway::GetInstanceLabel(GetHostName());
345         string address {};
346         string port {};
347         char ch = ':';
348         auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
349         // If string doesn't have
350         // character ch present in it
351         if (found != string::npos) {
352             address = sctpParams.prometheusPushAddress.substr(0,found);
353             port = sctpParams.prometheusPushAddress.substr(found + 1);
354             sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
355             sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
356         } else {
357             mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
358         }
359     }
360 }
361
362 int main(const int argc, char **argv) {
363     sctp_params_t sctpParams;
364
365     {
366         std::random_device device{};
367         std::mt19937 generator(device());
368         std::uniform_int_distribution<long> distribution(1, (long) 1e12);
369         transactionCounter = distribution(generator);
370     }
371
372 //    uint64_t st = 0;
373 //    uint32_t aux1 = 0;
374 //   st = rdtscp(aux1);
375
376     unsigned num_cpus = std::thread::hardware_concurrency();
377     init_log();
378     mdclog_level_set(MDCLOG_INFO);
379
380     if (std::signal(SIGINT, catch_function) == SIG_ERR) {
381         mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
382         exit(1);
383     }
384     if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
385         mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
386         exit(1);
387     }
388     if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
389         mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
390         exit(1);
391     }
392
393     cpuClock = approx_CPU_MHz(100);
394
395     mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
396
397     auto result = parse(argc, argv, sctpParams);
398
399     if (buildConfiguration(sctpParams) != 0) {
400         exit(-1);
401     }
402
403     //auto registry = std::make_shared<Registry>();
404     sctpParams.prometheusRegistry = std::make_shared<Registry>();
405
406     //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
407
408     startPrometheus(sctpParams);
409
410     // start epoll
411     sctpParams.epoll_fd = epoll_create1(0);
412     if (sctpParams.epoll_fd == -1) {
413         mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
414         exit(-1);
415     }
416
417     getRmrContext(sctpParams);
418     if (sctpParams.rmrCtx == nullptr) {
419         close(sctpParams.epoll_fd);
420         exit(-1);
421     }
422
423     if (buildInotify(sctpParams) == -1) {
424         close(sctpParams.rmrListenFd);
425         rmr_close(sctpParams.rmrCtx);
426         close(sctpParams.epoll_fd);
427         exit(-1);
428     }
429
430     if (buildListeningPort(sctpParams) != 0) {
431         close(sctpParams.rmrListenFd);
432         rmr_close(sctpParams.rmrCtx);
433         close(sctpParams.epoll_fd);
434         exit(-1);
435     }
436
437     sctpParams.sctpMap = new mapWrapper();
438
439     std::vector<std::thread> threads(num_cpus);
440 //    std::vector<std::thread> threads;
441
442     num_cpus = 1;
443     for (unsigned int i = 0; i < num_cpus; i++) {
444         threads[i] = std::thread(listener, &sctpParams);
445
446         cpu_set_t cpuset;
447         CPU_ZERO(&cpuset);
448         CPU_SET(i, &cpuset);
449         int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
450         if (rc != 0) {
451             mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
452         }
453     }
454
455
456     //loop over term_init until first message from xApp
457     handleTermInit(sctpParams);
458
459     for (auto &t : threads) {
460         t.join();
461     }
462
463     return 0;
464 }
465
466 void handleTermInit(sctp_params_t &sctpParams) {
467     sendTermInit(sctpParams);
468     //send to e2 manager init of e2 term
469     //E2_TERM_INIT
470
471     int count = 0;
472     while (true) {
473         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
474         if (xappMessages > 0) {
475             if (mdclog_level_get() >=  MDCLOG_INFO) {
476                 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
477             }
478             return;
479         }
480         usleep(100000);
481         count++;
482         if (count % 1000 == 0) {
483             mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
484             sendTermInit(sctpParams);
485         }
486     }
487 }
488
489 void sendTermInit(sctp_params_t &sctpParams) {
490     rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
491     auto count = 0;
492     while (true) {
493         msg->mtype = E2_TERM_INIT;
494         msg->state = 0;
495         rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
496         static unsigned char tx[32];
497         auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
498         rmr_bytes2xact(msg, tx, txLen);
499         msg = rmr_send_msg(sctpParams.rmrCtx, msg);
500         if (msg == nullptr) {
501             msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
502         } else if (msg->state == 0) {
503             rmr_free_msg(msg);
504             if (mdclog_level_get() >=  MDCLOG_INFO) {
505                 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
506             }
507             return;
508         } else {
509             if (count % 100 == 0) {
510                 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
511             }
512             sleep(1);
513         }
514         count++;
515     }
516 }
517
518 /**
519  *
520  * @param argc
521  * @param argv
522  * @param sctpParams
523  * @return
524  */
525 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
526     cxxopts::Options options(argv[0], "e2 term help");
527     options.positional_help("[optional args]").show_positional_help();
528     options.allow_unrecognised_options().add_options()
529             ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
530             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
531             ("h,help", "Print help");
532
533     auto result = options.parse(argc, argv);
534
535     if (result.count("help")) {
536         std::cout << options.help({""}) << std::endl;
537         exit(0);
538     }
539     return result;
540 }
541
542 /**
543  *
544  * @param sctpParams
545  * @return -1 failed 0 success
546  */
547 int buildInotify(sctp_params_t &sctpParams) {
548     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
549     if (sctpParams.inotifyFD == -1) {
550         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
551         close(sctpParams.rmrListenFd);
552         rmr_close(sctpParams.rmrCtx);
553         close(sctpParams.epoll_fd);
554         return -1;
555     }
556
557     sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
558                                              (const char *)sctpParams.configFilePath.c_str(),
559                                              (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
560     if (sctpParams.inotifyWD == -1) {
561         mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to  inotify (inotify_add_watch) %s",
562                      sctpParams.configFilePath.c_str(),
563                      strerror(errno));
564         close(sctpParams.inotifyFD);
565         return -1;
566     }
567
568     struct epoll_event event{};
569     event.events = (EPOLLIN);
570     event.data.fd = sctpParams.inotifyFD;
571     // add listening RMR FD to epoll
572     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
573         mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
574         close(sctpParams.inotifyFD);
575         return -1;
576     }
577     return 0;
578 }
579
580 /**
581  *
582  * @param args
583  * @return
584  */
585 void listener(sctp_params_t *params) {
586     int num_of_SCTP_messages = 0;
587     auto totalTime = 0.0;
588     mdclog_mdc_clean();
589     mdclog_level_set(params->logLevel);
590
591     std::thread::id this_id = std::this_thread::get_id();
592     //save cout
593     streambuf *oldCout = cout.rdbuf();
594     ostringstream memCout;
595     // create new cout
596     cout.rdbuf(memCout.rdbuf());
597     cout << this_id;
598     //return to the normal cout
599     cout.rdbuf(oldCout);
600
601     char tid[32];
602     memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
603     tid[memCout.str().length()] = 0;
604     mdclog_mdc_add("thread id", tid);
605
606     if (mdclog_level_get() >= MDCLOG_DEBUG) {
607         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
608     }
609
610     RmrMessagesBuffer_t rmrMessageBuffer{};
611     //create and init RMR
612     rmrMessageBuffer.rmrCtx = params->rmrCtx;
613
614     auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
615     struct timespec end{0, 0};
616     struct timespec start{0, 0};
617
618     rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
619     rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
620
621     memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
622     rmrMessageBuffer.ka_message_len = params->ka_message_length;
623     rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
624
625     if (mdclog_level_get() >= MDCLOG_DEBUG) {
626         mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
627     }
628
629     ReportingMessages_t message {};
630
631 //    for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
632 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
633 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
634 //    }
635
636     bool gatewayflag = false;
637     while (true) {
638         future<int> gateWay;
639
640         if (mdclog_level_get() >= MDCLOG_DEBUG) {
641             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
642         }
643         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
644         if (numOfEvents == 0) {
645             if (params->prometheusGateway != nullptr) {
646                 gateWay = params->prometheusGateway->AsyncPush();
647                 gatewayflag = true;
648             }
649             continue;
650         } else if (numOfEvents < 0) {
651             if (errno == EINTR) {
652                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
653                     mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
654                 }
655                 continue;
656             }
657             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
658             return;
659         }
660         if (gatewayflag) {
661             gatewayflag = false;
662             auto rc = gateWay.get();
663             if (rc != 200) {
664                 mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
665             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
666                 mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
667             }
668         }
669         for (auto i = 0; i < numOfEvents; i++) {
670             if (mdclog_level_get() >= MDCLOG_DEBUG) {
671                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
672             }
673             clock_gettime(CLOCK_MONOTONIC, &message.message.time);
674             start.tv_sec = message.message.time.tv_sec;
675             start.tv_nsec = message.message.time.tv_nsec;
676
677
678             if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
679                 handlepoll_error(events[i], message, rmrMessageBuffer, params);
680             } else if (events[i].events & EPOLLOUT) {
681                 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
682             } else if (params->listenFD == events[i].data.fd) {
683                 if (mdclog_level_get() >= MDCLOG_INFO) {
684                     mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
685                 }
686                 // new connection is requested from RAN  start build connection
687                 while (true) {
688                     struct sockaddr in_addr {};
689                     socklen_t in_len;
690                     char hostBuff[NI_MAXHOST];
691                     char portBuff[NI_MAXSERV];
692
693                     in_len = sizeof(in_addr);
694                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
695                     peerInfo->sctpParams = params;
696                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
697                     if (peerInfo->fileDescriptor == -1) {
698                         if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
699                             /* We have processed all incoming connections. */
700                             break;
701                         } else {
702                             mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
703                             break;
704                         }
705                     }
706                     if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
707                         mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
708                         close(peerInfo->fileDescriptor);
709                         break;
710                     }
711                     auto  ans = getnameinfo(&in_addr, in_len,
712                                             peerInfo->hostName, NI_MAXHOST,
713                                             peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
714                     if (ans < 0) {
715                         mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
716                         close(peerInfo->fileDescriptor);
717                         break;
718                     }
719                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
720                         mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
721                     }
722                     peerInfo->isConnected = false;
723                     peerInfo->gotSetup = false;
724                     if (addToEpoll(params->epoll_fd,
725                                    peerInfo,
726                                    (EPOLLIN | EPOLLET),
727                                    params->sctpMap, nullptr,
728                                    0) != 0) {
729                         break;
730                     }
731                     break;
732                 }
733             } else if (params->rmrListenFd == events[i].data.fd) {
734                 // got message from XAPP
735                 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
736                 num_of_messages.fetch_add(1, std::memory_order_release);
737                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
738                     mdclog_write(MDCLOG_DEBUG, "new message from RMR");
739                 }
740                 if (receiveXappMessages(params->sctpMap,
741                                         rmrMessageBuffer,
742                                         message.message.time) != 0) {
743                     mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
744                 }
745             } else if (params->inotifyFD == events[i].data.fd) {
746                 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
747                 handleConfigChange(params);
748             } else {
749                 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
750                  * We must read whatever data is available completely, as we are running
751                  *  in edge-triggered mode and won't get a notification again for the same data. */
752                 num_of_messages.fetch_add(1, std::memory_order_release);
753                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
754                     mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
755                 }
756                 receiveDataFromSctp(&events[i],
757                                     params->sctpMap,
758                                     num_of_SCTP_messages,
759                                     rmrMessageBuffer,
760                                     message.message.time);
761             }
762
763             clock_gettime(CLOCK_MONOTONIC, &end);
764             if (mdclog_level_get() >= MDCLOG_INFO) {
765                 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
766                               ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
767             }
768             if (mdclog_level_get() >= MDCLOG_DEBUG) {
769                 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
770                              end.tv_sec - start.tv_sec,
771                              end.tv_nsec - start.tv_nsec);
772             }
773         }
774     }
775 }
776
777 /**
778  *
779  * @param sctpParams
780  */
781 void handleConfigChange(sctp_params_t *sctpParams) {
782     char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
783     const struct inotify_event *event;
784     char *ptr;
785
786     path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
787     auto endlessLoop = true;
788     while (endlessLoop) {
789         auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
790         if (len == -1) {
791             if (errno != EAGAIN) {
792                 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
793                 endlessLoop = false;
794                 continue;
795             }
796             else {
797                 endlessLoop = false;
798                 continue;
799             }
800         }
801
802         for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
803             event = (const struct inotify_event *)ptr;
804             if (event->mask & (uint32_t)IN_ISDIR) {
805                 continue;
806             }
807
808             // the directory name
809             if (sctpParams->inotifyWD == event->wd) {
810                 // not the directory
811             }
812             if (event->len) {
813                 auto  retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
814                 if (retVal != 0) {
815                     continue;
816                 }
817             }
818             // only the file we want
819             if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
820                 if (mdclog_level_get() >= MDCLOG_INFO) {
821                     mdclog_write(MDCLOG_INFO, "Configuration file changed");
822                 }
823                 if (exists(p)) {
824                     const int size = 2048;
825                     auto fileSize = file_size(p);
826                     if (fileSize > size) {
827                         mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
828                         return;
829                     }
830                 } else {
831                     mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
832                     return;
833                 }
834
835                 ReadConfigFile conf;
836                 if (conf.openConfigFile(p.string()) == -1) {
837                     mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
838                                  p.string().c_str(), strerror(errno));
839                     return;
840                 }
841
842                 auto tmpStr = conf.getStringValue("loglevel");
843                 if (tmpStr.length() == 0) {
844                     mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
845                     tmpStr = "info";
846                 }
847                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
848
849                 if ((tmpStr.compare("debug")) == 0) {
850                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
851                     sctpParams->logLevel = MDCLOG_DEBUG;
852                 } else if ((tmpStr.compare("info")) == 0) {
853                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
854                     sctpParams->logLevel = MDCLOG_INFO;
855                 } else if ((tmpStr.compare("warning")) == 0) {
856                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
857                     sctpParams->logLevel = MDCLOG_WARN;
858                 } else if ((tmpStr.compare("error")) == 0) {
859                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
860                     sctpParams->logLevel = MDCLOG_ERR;
861                 } else {
862                     mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
863                     sctpParams->logLevel = MDCLOG_INFO;
864                 }
865                 mdclog_level_set(sctpParams->logLevel);
866
867
868                 tmpStr = conf.getStringValue("trace");
869                 if (tmpStr.length() == 0) {
870                     mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
871                     tmpStr = "stop";
872                 }
873
874                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
875                 if ((tmpStr.compare("start")) == 0) {
876                     mdclog_write(MDCLOG_INFO, "Trace set to: start");
877                     sctpParams->trace = true;
878                 } else if ((tmpStr.compare("stop")) == 0) {
879                     mdclog_write(MDCLOG_INFO, "Trace set to: stop");
880                     sctpParams->trace = false;
881                 } else {
882                     mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
883                     sctpParams->trace = false;
884                 }
885                 jsonTrace = sctpParams->trace;
886
887                 if (strcmp(sctpParams->prometheusMode.c_str(), "push") == 0) {
888                     auto timeout = conf.getIntValue("prometheusPushTimeOut");
889                     if (timeout >= 5 && timeout <= 300) {
890                         sctpParams->epollTimeOut = timeout * 1000;
891                     } else {
892                         mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
893                                      timeout);
894                     }
895                 }
896
897                 endlessLoop = false;
898             }
899         }
900     }
901 }
902
903 /**
904  *
905  * @param event
906  * @param message
907  * @param rmrMessageBuffer
908  * @param params
909  */
910 void handleEinprogressMessages(struct epoll_event &event,
911                                ReportingMessages_t &message,
912                                RmrMessagesBuffer_t &rmrMessageBuffer,
913                                sctp_params_t *params) {
914     auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
915     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
916
917     mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
918     auto retVal = 0;
919     socklen_t retValLen = 0;
920     auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
921     if (rc != 0 || retVal != 0) {
922         if (rc != 0) {
923             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
924                                                          "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
925                                                          peerInfo->enodbName, strerror(errno));
926         } else if (retVal != 0) {
927             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
928                                                          "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
929                                                          peerInfo->enodbName);
930         }
931
932         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
933         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
934         mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
935         message.message.direction = 'N';
936         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
937             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
938         }
939         memset(peerInfo->asnData, 0, peerInfo->asnLength);
940         peerInfo->asnLength = 0;
941         peerInfo->mtype = 0;
942         return;
943     }
944
945     peerInfo->isConnected = true;
946
947     if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
948                       peerInfo->mtype) != 0) {
949         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
950         return;
951     }
952
953     message.message.asndata = (unsigned char *)peerInfo->asnData;
954     message.message.asnLength = peerInfo->asnLength;
955     message.message.messageType = peerInfo->mtype;
956     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
957     num_of_messages.fetch_add(1, std::memory_order_release);
958     if (mdclog_level_get() >= MDCLOG_DEBUG) {
959         mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
960                      message.message.enodbName);
961     }
962     if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
963         if (mdclog_level_get() >= MDCLOG_DEBUG) {
964             mdclog_write(MDCLOG_DEBUG, "Error write to SCTP  %s %d", __func__, __LINE__);
965         }
966         return;
967     }
968
969     memset(peerInfo->asnData, 0, peerInfo->asnLength);
970     peerInfo->asnLength = 0;
971     peerInfo->mtype = 0;
972 }
973
974
975 void handlepoll_error(struct epoll_event &event,
976                       ReportingMessages_t &message,
977                       RmrMessagesBuffer_t &rmrMessageBuffer,
978                       sctp_params_t *params) {
979     if (event.data.fd != params->rmrListenFd) {
980         auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
981         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
982                      event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
983
984         rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
985                                                      "%s|Failed SCTP Connection",
986                                                      peerInfo->enodbName);
987         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
988         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
989
990         memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
991         message.message.direction = 'N';
992         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
993             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
994         }
995
996         close(peerInfo->fileDescriptor);
997         params->sctpMap->erase(peerInfo->enodbName);
998         cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
999     } else {
1000         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
1001     }
1002 }
1003 /**
1004  *
1005  * @param socket
1006  * @return
1007  */
1008 int setSocketNoBlocking(int socket) {
1009     auto flags = fcntl(socket, F_GETFL, 0);
1010
1011     if (flags == -1) {
1012         mdclog_mdc_add("func", "fcntl");
1013         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1014         mdclog_mdc_clean();
1015         return -1;
1016     }
1017
1018     flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1019     if (fcntl(socket, F_SETFL, flags) == -1) {
1020         mdclog_mdc_add("func", "fcntl");
1021         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1022         mdclog_mdc_clean();
1023         return -1;
1024     }
1025
1026     return 0;
1027 }
1028
1029 /**
1030  *
1031  * @param val
1032  * @param m
1033  */
1034 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1035     char *dummy;
1036     auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1037     char searchBuff[2048]{};
1038
1039     snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1040     m->erase(searchBuff);
1041
1042     m->erase(val->enodbName);
1043     free(val);
1044 }
1045
1046 /**
1047  *
1048  * @param fd file discriptor
1049  * @param data the asn data to send
1050  * @param len  length of the data
1051  * @param enodbName the enodbName as in the map for printing purpose
1052  * @param m map host information
1053  * @param mtype message number
1054  * @return 0 success, anegative number on fail
1055  */
1056 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1057     auto loglevel = mdclog_level_get();
1058     int fd = peerInfo->fileDescriptor;
1059     if (loglevel >= MDCLOG_DEBUG) {
1060         mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1061                      message.message.enodbName, __FUNCTION__);
1062     }
1063
1064     while (true) {
1065         if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1066             if (errno == EINTR) {
1067                 continue;
1068             }
1069             mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1070             if (!peerInfo->isConnected) {
1071                 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1072                 return -1;
1073             }
1074             cleanHashEntry(peerInfo, m);
1075             close(fd);
1076             char key[MAX_ENODB_NAME_SIZE * 2];
1077             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1078                      message.message.messageType);
1079             if (loglevel >= MDCLOG_DEBUG) {
1080                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1081             }
1082             auto tmp = m->find(key);
1083             if (tmp) {
1084                 free(tmp);
1085             }
1086             m->erase(key);
1087             return -1;
1088         }
1089         message.message.direction = 'D';
1090         // send report.buffer of size
1091         buildJsonMessage(message);
1092
1093         if (loglevel >= MDCLOG_DEBUG) {
1094             mdclog_write(MDCLOG_DEBUG,
1095                          "SCTP message for CU %s sent from %s",
1096                          message.message.enodbName,
1097                          __FUNCTION__);
1098         }
1099         return 0;
1100     }
1101 }
1102
1103 /**
1104  *
1105  * @param message
1106  * @param rmrMessageBuffer
1107  */
1108 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1109     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1110     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1111
1112     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1113         mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1114                      message.message.enodbName, (unsigned long) message.message.asnLength);
1115     }
1116 }
1117
1118
1119
1120 /**
1121  *
1122  * @param events
1123  * @param sctpMap
1124  * @param numOfMessages
1125  * @param rmrMessageBuffer
1126  * @param ts
1127  * @return
1128  */
1129 int receiveDataFromSctp(struct epoll_event *events,
1130                         Sctp_Map_t *sctpMap,
1131                         int &numOfMessages,
1132                         RmrMessagesBuffer_t &rmrMessageBuffer,
1133                         struct timespec &ts) {
1134     /* We have data on the fd waiting to be read. Read and display it.
1135  * We must read whatever data is available completely, as we are running
1136  *  in edge-triggered mode and won't get a notification again for the same data. */
1137     ReportingMessages_t message {};
1138     auto done = 0;
1139     auto loglevel = mdclog_level_get();
1140
1141     // get the identity of the interface
1142     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1143
1144     struct timespec start{0, 0};
1145     struct timespec decodestart{0, 0};
1146     struct timespec end{0, 0};
1147
1148     E2AP_PDU_t *pdu = nullptr;
1149
1150     while (true) {
1151         if (loglevel >= MDCLOG_DEBUG) {
1152             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1153             clock_gettime(CLOCK_MONOTONIC, &start);
1154         }
1155         // read the buffer directly to rmr payload
1156         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1157         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1158                 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1159
1160         if (loglevel >= MDCLOG_DEBUG) {
1161             mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1162                          message.peerInfo->fileDescriptor, message.message.asnLength);
1163         }
1164
1165         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1166         message.message.direction = 'U';
1167         message.message.time.tv_nsec = ts.tv_nsec;
1168         message.message.time.tv_sec = ts.tv_sec;
1169
1170         if (message.message.asnLength < 0) {
1171             if (errno == EINTR) {
1172                 continue;
1173             }
1174             /* If errno == EAGAIN, that means we have read all
1175                data. So goReportingMessages_t back to the main loop. */
1176             if (errno != EAGAIN) {
1177                 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1178                 done = 1;
1179             } else if (loglevel >= MDCLOG_DEBUG) {
1180                 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1181             }
1182             break;
1183         } else if (message.message.asnLength == 0) {
1184             /* End of file. The remote has closed the connection. */
1185             if (loglevel >= MDCLOG_INFO) {
1186                 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1187                              message.peerInfo->fileDescriptor);
1188             }
1189             done = 1;
1190             break;
1191         }
1192
1193         if (loglevel >= MDCLOG_DEBUG) {
1194             char printBuffer[4096]{};
1195             char *tmp = printBuffer;
1196             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1197                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1198                 tmp += 2;
1199             }
1200             printBuffer[message.message.asnLength] = 0;
1201             clock_gettime(CLOCK_MONOTONIC, &end);
1202             mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1203                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1204             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
1205                          printBuffer);
1206             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1207         }
1208
1209         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1210                           message.message.asndata, message.message.asnLength);
1211         if (rval.code != RC_OK) {
1212             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1213                          message.peerInfo->enodbName);
1214             break;
1215         }
1216
1217         if (loglevel >= MDCLOG_DEBUG) {
1218             clock_gettime(CLOCK_MONOTONIC, &end);
1219             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1220                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1221             char *printBuffer;
1222             size_t size;
1223             FILE *stream = open_memstream(&printBuffer, &size);
1224             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1225             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1226             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1227         }
1228
1229         switch (pdu->present) {
1230             case E2AP_PDU_PR_initiatingMessage: {//initiating message
1231                 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1232                 break;
1233             }
1234             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1235                 asnSuccsesfulMsg(pdu, sctpMap, message,  rmrMessageBuffer);
1236                 break;
1237             }
1238             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1239                 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1240                 break;
1241             }
1242             default:
1243                 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1244                 break;
1245         }
1246         if (loglevel >= MDCLOG_DEBUG) {
1247             clock_gettime(CLOCK_MONOTONIC, &end);
1248             mdclog_write(MDCLOG_DEBUG,
1249                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1250                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1251         }
1252         numOfMessages++;
1253         if (pdu != nullptr) {
1254             ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1255             //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1256             //pdu = nullptr;
1257         }
1258     }
1259
1260     if (done) {
1261         if (loglevel >= MDCLOG_INFO) {
1262             mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1263         }
1264         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1265                 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1266                          256,
1267                          "%s|CU disconnected unexpectedly",
1268                          message.peerInfo->enodbName);
1269         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1270
1271         if (sendRequestToXapp(message,
1272                               RIC_SCTP_CONNECTION_FAILURE,
1273                               rmrMessageBuffer) != 0) {
1274             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1275         }
1276
1277         /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1278         close(message.peerInfo->fileDescriptor);
1279         cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1280     }
1281     if (loglevel >= MDCLOG_DEBUG) {
1282         clock_gettime(CLOCK_MONOTONIC, &end);
1283         mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1284                      end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1285
1286     }
1287     return 0;
1288 }
1289
1290 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1291                                      RmrMessagesBuffer_t &rmrMessageBuffer,
1292                                      E2AP_PDU_t *pdu/*,
1293                                      string const &messageName,
1294                                      string const &ieName,
1295                                      vector<string> &functionsToAdd_v,
1296                                      vector<string> &functionsToModified_v*/) {
1297     auto logLevel = mdclog_level_get();
1298     // now we can send the data to e2Mgr
1299
1300     asn_enc_rval_t er;
1301     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1302     unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1303     while (true) {
1304         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1305         if (er.encoded == -1) {
1306             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1307             return;
1308         } else if (er.encoded > (ssize_t) buffer_size) {
1309             buffer_size = er.encoded + 128;
1310             mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1311                          (int) buffer_size,
1312                          asn_DEF_E2AP_PDU.name, buffer_size);
1313             buffer_size = er.encoded + 128;
1314 //            free(buffer);
1315             continue;
1316         }
1317         buffer[er.encoded] = '\0';
1318         break;
1319     }
1320     // encode to xml
1321
1322     string res((char *)buffer);
1323     res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1324     res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1325     res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1326
1327 //    string res {};
1328 //    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1329 //        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1330 //    }
1331     rmr_mbuf_t *rmrMsg;
1332 //    if (res.length() == 0) {
1333 //        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1334 //        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1335 //                               message.peerInfo->sctpParams->myIP.c_str(),
1336 //                               message.peerInfo->sctpParams->rmrPort,
1337 //                               buffer);
1338 //    } else {
1339         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1340         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1341                                message.peerInfo->sctpParams->myIP.c_str(),
1342                                message.peerInfo->sctpParams->rmrPort,
1343                                res.c_str());
1344 //    }
1345
1346     if (logLevel >= MDCLOG_DEBUG) {
1347         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1348     }
1349     // send to RMR
1350     rmrMsg->mtype = message.message.messageType;
1351     rmrMsg->state = 0;
1352     rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1353
1354     static unsigned char tx[32];
1355     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1356     rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1357
1358     rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1359     if (rmrMsg == nullptr) {
1360         mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1361     } else if (rmrMsg->state != 0) {
1362         char meid[RMR_MAX_MEID]{};
1363         if (rmrMsg->state == RMR_ERR_RETRY) {
1364             usleep(5);
1365             rmrMsg->state = 0;
1366             mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1367                          rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1368             rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1369             if (rmrMsg == nullptr) {
1370                 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1371             } else if (rmrMsg->state != 0) {
1372                 mdclog_write(MDCLOG_ERR,
1373                              "RMR Retry failed %s sending request %d to Xapp from %s",
1374                              translateRmrErrorMessages(rmrMsg->state).c_str(),
1375                              rmrMsg->mtype,
1376                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
1377             }
1378         } else {
1379             mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1380                          translateRmrErrorMessages(rmrMsg->state).c_str(),
1381                          rmrMsg->mtype,
1382                          rmr_get_meid(rmrMsg, (unsigned char *) meid));
1383         }
1384     }
1385     message.peerInfo->gotSetup = true;
1386     buildJsonMessage(message);
1387     if (rmrMsg != nullptr) {
1388         rmr_free_msg(rmrMsg);
1389     }
1390 }
1391
1392 #if 0
1393 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1394     auto index = 0;
1395     runFunXML_v.clear();
1396     for (auto j = 0; j < list.list.count; j++) {
1397         auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1398         if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1399             (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1400             // encode to xml
1401             E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1402             auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1403                                    &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1404                                    (void **)&ranFunDef,
1405                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1406                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1407             if (rval.code != RC_OK) {
1408                 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1409                              rval.code,
1410                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1411                 return -1;
1412             }
1413
1414             auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1415             unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1416             memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1417             // encode to xml
1418             auto er = asn_encode_to_buffer(nullptr,
1419                                            ATS_BASIC_XER,
1420                                            &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1421                                            ranFunDef,
1422                                            xml_buffer,
1423                                            xml_buffer_size);
1424             if (er.encoded == -1) {
1425                 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1426                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1427                              strerror(errno));
1428             } else if (er.encoded > (ssize_t)xml_buffer_size) {
1429                 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1430                              (int) xml_buffer_size,
1431                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1432             } else {
1433                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1434                     mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1435                                  asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1436                                  index++,
1437                                  xml_buffer);
1438                 }
1439
1440                 string runFuncs = (char *)(xml_buffer);
1441                 runFunXML_v.emplace_back(runFuncs);
1442             }
1443         }
1444     }
1445     return 0;
1446 }
1447
1448 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1449                                      Sctp_Map_t *sctpMap,
1450                                      ReportingMessages_t &message,
1451                                      vector <string> &RANfunctionsAdded_v,
1452                                      vector <string> &RANfunctionsModified_v) {
1453     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1454     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1455         auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1456         if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1457             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1458                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1459                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1460                                  ie->value.choice.RANfunctions_List.list.count);
1461                 }
1462                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1463                     return -1;
1464                 }
1465             }
1466         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1467             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1468                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1469                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1470                                  ie->value.choice.RANfunctions_List.list.count);
1471                 }
1472                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1473                     return -1;
1474                 }
1475             }
1476         }
1477     }
1478     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1479         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1480                      RANfunctionsAdded_v.size());
1481     }
1482     return 0;
1483 }
1484
1485 #endif
1486
1487
1488 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1489     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1490     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1491
1492     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1493     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1494
1495     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1496     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1497
1498     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1499     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1500
1501     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1502     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1503     // ---------------------------------------------
1504     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1505     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1506
1507     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1508     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1509
1510     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1511     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1512
1513     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1514     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1515     //-------------------------------------------------------------
1516
1517     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1518     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1519
1520     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1521     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1522
1523     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1524     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1525
1526     //====================================================================================
1527     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1528     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1529
1530     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1531     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1532
1533     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1534     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1535
1536     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1537     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1538
1539     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1540     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1541
1542     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1543     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1544     //---------------------------------------------------------------------------------------------------------
1545     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1546     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1547
1548     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1549     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1550
1551     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1552     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1553     //----------------------------------------------------------------------------------------------------------------
1554     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1555     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1556
1557     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1558     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1559 }
1560 /**
1561  *
1562  * @param pdu
1563  * @param sctpMap
1564  * @param message
1565  * @param RANfunctionsAdded_v
1566  * @return
1567  */
1568 int collectSetupRequestData(E2AP_PDU_t *pdu,
1569                                      Sctp_Map_t *sctpMap,
1570                                      ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1571     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1572     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1573         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1574         if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1575             // get the ran name for meid
1576             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1577                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1578                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1579                     // no mesage will be sent
1580                     return -1;
1581                 }
1582
1583                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1584                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1585             }
1586         } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1587             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1588                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1589                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1590                                  ie->value.choice.RANfunctions_List.list.count);
1591                 }
1592                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1593                     return -1;
1594                 }
1595             }
1596         } */
1597     }
1598 //    if (mdclog_level_get() >= MDCLOG_DEBUG) {
1599 //        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1600 //                     RANfunctionsAdded_v.size());
1601 //    }
1602     return 0;
1603 }
1604
1605 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1606     E2AP_PDU_t *pdu = nullptr;
1607
1608     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1609         mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1610                      rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1611     }
1612     auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1613                            rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1614     if (rval.code != RC_OK) {
1615         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1616                      rval.code,
1617                      message.message.enodbName);
1618         return -1;
1619     }
1620
1621     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1622     auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1623                                    rmrMessageBuffer.sendMessage->payload, buff_size);
1624     if (er.encoded == -1) {
1625         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1626         return -1;
1627     } else if (er.encoded > (ssize_t)buff_size) {
1628         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1629                      (int)rmrMessageBuffer.sendMessage->len,
1630                      asn_DEF_E2AP_PDU.name,
1631                      __func__,
1632                      __LINE__);
1633         return -1;
1634     }
1635     rmrMessageBuffer.sendMessage->len = er.encoded;
1636     return 0;
1637
1638 }
1639
1640 /**
1641  *
1642  * @param pdu
1643  * @param message
1644  * @param rmrMessageBuffer
1645  */
1646 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1647                           Sctp_Map_t *sctpMap,
1648                           ReportingMessages_t &message,
1649                           RmrMessagesBuffer_t &rmrMessageBuffer) {
1650     auto logLevel = mdclog_level_get();
1651     auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1652     if (logLevel >= MDCLOG_DEBUG) {
1653         mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1654     }
1655     switch (procedureCode) {
1656         case ProcedureCode_id_E2setup: {
1657             if (logLevel >= MDCLOG_DEBUG) {
1658                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1659             }
1660
1661 //            vector <string> RANfunctionsAdded_v;
1662 //            vector <string> RANfunctionsModified_v;
1663 //            RANfunctionsAdded_v.clear();
1664 //            RANfunctionsModified_v.clear();
1665             if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1666                 break;
1667             }
1668
1669             buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1670
1671             string messageName("E2setupRequest");
1672             string ieName("E2setupRequestIEs");
1673             message.message.messageType = RIC_E2_SETUP_REQ;
1674             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1675             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1676             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1677             break;
1678         }
1679         case ProcedureCode_id_RICserviceUpdate: {
1680             if (logLevel >= MDCLOG_DEBUG) {
1681                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1682             }
1683 //            vector <string> RANfunctionsAdded_v;
1684 //            vector <string> RANfunctionsModified_v;
1685 //            RANfunctionsAdded_v.clear();
1686 //            RANfunctionsModified_v.clear();
1687 //            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1688 //                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1689 //                break;
1690 //            }
1691
1692             string messageName("RICserviceUpdate");
1693             string ieName("RICserviceUpdateIEs");
1694             message.message.messageType = RIC_SERVICE_UPDATE;
1695             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1696             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1697
1698             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1699             break;
1700         }
1701         case ProcedureCode_id_ErrorIndication: {
1702             if (logLevel >= MDCLOG_DEBUG) {
1703                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1704             }
1705             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1706             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1707             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1708                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1709             }
1710             break;
1711         }
1712         case ProcedureCode_id_Reset: {
1713             if (logLevel >= MDCLOG_DEBUG) {
1714                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1715             }
1716
1717             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1718             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1719             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1720                 break;
1721             }
1722
1723             if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1724                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1725             }
1726             break;
1727         }
1728         case ProcedureCode_id_RICindication: {
1729             if (logLevel >= MDCLOG_DEBUG) {
1730                 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1731             }
1732             for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1733                 auto messageSent = false;
1734                 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1735                 if (logLevel >= MDCLOG_DEBUG) {
1736                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1737                 }
1738                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1739                     if (logLevel >= MDCLOG_DEBUG) {
1740                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1741                     }
1742                     if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1743                         static unsigned char tx[32];
1744                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1745                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1746                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1747                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1748                                        (unsigned char *)message.message.enodbName,
1749                                        strlen(message.message.enodbName));
1750                         rmrMessageBuffer.sendMessage->state = 0;
1751                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1752
1753                         //ie->value.choice.RICrequestID.ricInstanceID;
1754                         if (mdclog_level_get() >= MDCLOG_DEBUG) {
1755                             mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1756                                          rmrMessageBuffer.sendMessage->sub_id,
1757                                          rmrMessageBuffer.sendMessage->mtype,
1758                                          ie->value.choice.RICrequestID.ricInstanceID,
1759                                          ie->value.choice.RICrequestID.ricRequestorID);
1760                         }
1761                         message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1762                         message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1763                         sendRmrMessage(rmrMessageBuffer, message);
1764                         messageSent = true;
1765                     } else {
1766                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1767                     }
1768                 }
1769                 if (messageSent) {
1770                     break;
1771                 }
1772             }
1773             break;
1774         }
1775         default: {
1776             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1777             message.message.messageType = 0; // no RMR message type yet
1778
1779             buildJsonMessage(message);
1780
1781             break;
1782         }
1783     }
1784 }
1785
1786 /**
1787  *
1788  * @param pdu
1789  * @param message
1790  * @param rmrMessageBuffer
1791  */
1792 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1793                       Sctp_Map_t *sctpMap,
1794                       ReportingMessages_t &message,
1795                       RmrMessagesBuffer_t &rmrMessageBuffer) {
1796     auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1797     auto logLevel = mdclog_level_get();
1798     if (logLevel >= MDCLOG_INFO) {
1799         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1800     }
1801     switch (procedureCode) {
1802         case ProcedureCode_id_Reset: {
1803             if (logLevel >= MDCLOG_DEBUG) {
1804                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1805             }
1806             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1807             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1808             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1809                 break;
1810             }
1811             if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1812                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1813             }
1814             break;
1815         }
1816         case ProcedureCode_id_RICcontrol: {
1817             if (logLevel >= MDCLOG_DEBUG) {
1818                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1819             }
1820             for (auto i = 0;
1821                  i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1822                 auto messageSent = false;
1823                 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1824                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1825                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1826                 }
1827                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1828                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1829                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1830                     }
1831                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1832                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1833                         rmrMessageBuffer.sendMessage->state = 0;
1834 //                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1835                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1836
1837                         static unsigned char tx[32];
1838                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1839                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1840                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1841                                        (unsigned char *)message.message.enodbName,
1842                                        strlen(message.message.enodbName));
1843
1844                         message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1845                         message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1846                         sendRmrMessage(rmrMessageBuffer, message);
1847                         messageSent = true;
1848                     } else {
1849                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1850                     }
1851                 }
1852                 if (messageSent) {
1853                     break;
1854                 }
1855             }
1856
1857             break;
1858         }
1859         case ProcedureCode_id_RICsubscription: {
1860             if (logLevel >= MDCLOG_DEBUG) {
1861                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1862             }
1863             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1864             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1865             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1866                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1867             }
1868             break;
1869         }
1870         case ProcedureCode_id_RICsubscriptionDelete: {
1871             if (logLevel >= MDCLOG_DEBUG) {
1872                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1873             }
1874             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1875             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1876             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1877                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1878             }
1879             break;
1880         }
1881         default: {
1882             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1883             message.message.messageType = 0; // no RMR message type yet
1884             buildJsonMessage(message);
1885
1886             break;
1887         }
1888     }
1889 }
1890
1891 /**
1892  *
1893  * @param pdu
1894  * @param message
1895  * @param rmrMessageBuffer
1896  */
1897 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1898                         Sctp_Map_t *sctpMap,
1899                         ReportingMessages_t &message,
1900                         RmrMessagesBuffer_t &rmrMessageBuffer) {
1901     auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1902     auto logLevel = mdclog_level_get();
1903     if (logLevel >= MDCLOG_INFO) {
1904         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1905     }
1906     switch (procedureCode) {
1907         case ProcedureCode_id_RICcontrol: {
1908             if (logLevel >= MDCLOG_DEBUG) {
1909                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1910             }
1911             for (int i = 0;
1912                  i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1913                 auto messageSent = false;
1914                 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1915                 if (logLevel >= MDCLOG_DEBUG) {
1916                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1917                 }
1918                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1919                     if (logLevel >= MDCLOG_DEBUG) {
1920                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1921                     }
1922                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1923                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1924                         rmrMessageBuffer.sendMessage->state = 0;
1925 //                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1926                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1927                         static unsigned char tx[32];
1928                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1929                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1930                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1931                                        strlen(message.message.enodbName));
1932                         message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1933                         message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1934                         sendRmrMessage(rmrMessageBuffer, message);
1935                         messageSent = true;
1936                     } else {
1937                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1938                     }
1939                 }
1940                 if (messageSent) {
1941                     break;
1942                 }
1943             }
1944             break;
1945         }
1946         case ProcedureCode_id_RICsubscription: {
1947             if (logLevel >= MDCLOG_DEBUG) {
1948                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1949             }
1950             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1951             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1952             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1953                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1954             }
1955             break;
1956         }
1957         case ProcedureCode_id_RICsubscriptionDelete: {
1958             if (logLevel >= MDCLOG_DEBUG) {
1959                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1960             }
1961             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1962             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1963             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1964                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1965             }
1966             break;
1967         }
1968         default: {
1969             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1970             message.message.messageType = 0; // no RMR message type yet
1971
1972             buildJsonMessage(message);
1973
1974             break;
1975         }
1976     }
1977 }
1978
1979 /**
1980  *
1981  * @param message
1982  * @param requestId
1983  * @param rmrMmessageBuffer
1984  * @return
1985  */
1986 int sendRequestToXapp(ReportingMessages_t &message,
1987                       int requestId,
1988                       RmrMessagesBuffer_t &rmrMmessageBuffer) {
1989     rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1990                    (unsigned char *)message.message.enodbName,
1991                    strlen(message.message.enodbName));
1992     message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1993     rmrMmessageBuffer.sendMessage->state = 0;
1994     static unsigned char tx[32];
1995     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1996     rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1997
1998     auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1999     return rc;
2000 }
2001
2002 /**
2003  *
2004  * @param pSctpParams
2005  */
2006 void getRmrContext(sctp_params_t &pSctpParams) {
2007     pSctpParams.rmrCtx = nullptr;
2008     pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
2009     if (pSctpParams.rmrCtx == nullptr) {
2010         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
2011         return;
2012     }
2013
2014     rmr_set_stimeout(pSctpParams.rmrCtx, 0);    // disable retries for any send operation
2015     // we need to find that routing table exist and we can run
2016     if (mdclog_level_get() >= MDCLOG_INFO) {
2017         mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2018     }
2019     int rmrReady = 0;
2020     int count = 0;
2021     while (!rmrReady) {
2022         if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2023             sleep(1);
2024         }
2025         count++;
2026         if (count % 60 == 0) {
2027             mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2028         }
2029     }
2030     if (mdclog_level_get() >= MDCLOG_INFO) {
2031         mdclog_write(MDCLOG_INFO, "RMR running");
2032     }
2033     rmr_init_trace(pSctpParams.rmrCtx, 200);
2034     // get the RMR fd for the epoll
2035     pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2036     struct epoll_event event{};
2037     // add RMR fd to epoll
2038     event.events = (EPOLLIN);
2039     event.data.fd = pSctpParams.rmrListenFd;
2040     // add listening RMR FD to epoll
2041     if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2042         mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2043         close(pSctpParams.rmrListenFd);
2044         rmr_close(pSctpParams.rmrCtx);
2045         pSctpParams.rmrCtx = nullptr;
2046     }
2047 }
2048
2049 /**
2050  *
2051  * @param message
2052  * @param rmrMessageBuffer
2053  * @return
2054  */
2055 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2056     E2AP_PDU_t *pdu = nullptr;
2057
2058     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2059         mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
2060                 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2061     }
2062     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2063                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2064     if (rval.code != RC_OK) {
2065         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
2066                      rval.code,
2067                      message.message.enodbName);
2068         return -1;
2069     }
2070
2071     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2072     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2073                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
2074     if (er.encoded == -1) {
2075         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2076         return -1;
2077     } else if (er.encoded > (ssize_t)buff_size) {
2078         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2079                      (int)rmrMessageBuffer.rcvMessage->len,
2080                      asn_DEF_E2AP_PDU.name,
2081                      __func__,
2082                      __LINE__);
2083         return -1;
2084     }
2085     rmrMessageBuffer.rcvMessage->len = er.encoded;
2086     return 0;
2087 }
2088
2089 /**
2090  *
2091  * @param sctpMap
2092  * @param rmrMessageBuffer
2093  * @param ts
2094  * @return
2095  */
2096 int receiveXappMessages(Sctp_Map_t *sctpMap,
2097                         RmrMessagesBuffer_t &rmrMessageBuffer,
2098                         struct timespec &ts) {
2099     if (rmrMessageBuffer.rcvMessage == nullptr) {
2100         //we have error
2101         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2102         return -1;
2103     }
2104
2105     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2106         mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2107     }
2108     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2109     if (rmrMessageBuffer.rcvMessage == nullptr) {
2110         mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2111         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2112         return -2;
2113     }
2114     ReportingMessages_t message;
2115     message.message.direction = 'D';
2116     message.message.time.tv_nsec = ts.tv_nsec;
2117     message.message.time.tv_sec = ts.tv_sec;
2118
2119     // get message payload
2120     //auto msgData = msg->payload;
2121     if (rmrMessageBuffer.rcvMessage->state != 0) {
2122         mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2123         return -1;
2124     }
2125     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2126     message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2127     if (message.peerInfo == nullptr) {
2128         auto type = rmrMessageBuffer.rcvMessage->mtype;
2129         switch (type) {
2130             case RIC_SCTP_CLEAR_ALL:
2131             case E2_TERM_KEEP_ALIVE_REQ:
2132             case RIC_HEALTH_CHECK_REQ:
2133                 break;
2134             default:
2135                 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2136                 return -1;
2137         }
2138     }
2139
2140     switch (rmrMessageBuffer.rcvMessage->mtype) {
2141         case RIC_E2_SETUP_RESP : {
2142             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2143                 break;
2144             }
2145             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2146             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2147             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2148                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2149                 return -6;
2150             }
2151             break;
2152         }
2153         case RIC_E2_SETUP_FAILURE : {
2154             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2155                 break;
2156             }
2157             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2158             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2159             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2160                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2161                 return -6;
2162             }
2163             break;
2164         }
2165         case RIC_ERROR_INDICATION: {
2166             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2167             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2168             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2169                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2170                 return -6;
2171             }
2172             break;
2173         }
2174         case RIC_SUB_REQ: {
2175             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2176             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2177             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2178                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2179                 return -6;
2180             }
2181             break;
2182         }
2183         case RIC_SUB_DEL_REQ: {
2184             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2185             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2186             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2187                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2188                 return -6;
2189             }
2190             break;
2191         }
2192         case RIC_CONTROL_REQ: {
2193             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2194             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2195             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2196                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2197                 return -6;
2198             }
2199             break;
2200         }
2201         case RIC_SERVICE_QUERY: {
2202             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2203                 break;
2204             }
2205             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2206             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2207             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2208                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2209                 return -6;
2210             }
2211             break;
2212         }
2213         case RIC_SERVICE_UPDATE_ACK: {
2214             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2215                 break;
2216             }
2217             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2218             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2219             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2220                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2221                 return -6;
2222             }
2223             break;
2224         }
2225         case RIC_SERVICE_UPDATE_FAILURE: {
2226             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2227                 break;
2228             }
2229             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2230             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2231             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2232                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2233                 return -6;
2234             }
2235             break;
2236         }
2237         case RIC_E2_RESET_REQ: {
2238             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2239                 break;
2240             }
2241             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2242             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2243             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2244                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2245                 return -6;
2246             }
2247             break;
2248         }
2249         case RIC_E2_RESET_RESP: {
2250             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2251                 break;
2252             }
2253             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2254             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2255             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2256                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2257                 return -6;
2258             }
2259             break;
2260         }
2261         case RIC_SCTP_CLEAR_ALL: {
2262             mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2263             // loop on all keys and close socket and then erase all map.
2264             vector<char *> v;
2265             sctpMap->getKeys(v);
2266             for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2267                 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2268                     auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2269                     if (peerInfo == nullptr) {
2270                         continue;
2271                     }
2272                     close(peerInfo->fileDescriptor);
2273                     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2274                     message.message.direction = 'D';
2275                     message.message.time.tv_nsec = ts.tv_nsec;
2276                     message.message.time.tv_sec = ts.tv_sec;
2277
2278                     message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2279                             snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2280                                      256,
2281                                      "%s|RIC_SCTP_CLEAR_ALL",
2282                                      peerInfo->enodbName);
2283                     message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2284                     mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2285                     if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2286                         mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2287                     }
2288                     free(peerInfo);
2289                 }
2290             }
2291
2292             sleep(1);
2293             sctpMap->clear();
2294             break;
2295         }
2296         case E2_TERM_KEEP_ALIVE_REQ: {
2297             // send message back
2298             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2299                               (unsigned char *)rmrMessageBuffer.ka_message,
2300                               rmrMessageBuffer.ka_message_len);
2301             rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2302             rmrMessageBuffer.sendMessage->state = 0;
2303             static unsigned char tx[32];
2304             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2305             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2306             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2307             if (rmrMessageBuffer.sendMessage == nullptr) {
2308                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2309                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2310             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2311                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2312                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2313             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2314                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2315             }
2316
2317             break;
2318         }
2319         case RIC_HEALTH_CHECK_REQ: {
2320             // send message back
2321             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2322                               (unsigned char *)"OK",
2323                               2);
2324             rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2325             rmrMessageBuffer.sendMessage->state = 0;
2326             static unsigned char tx[32];
2327             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2328             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2329             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2330             if (rmrMessageBuffer.sendMessage == nullptr) {
2331                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2332                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2333             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2334                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2335                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2336             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2337                 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2338             }
2339
2340             break;
2341         }
2342
2343         default:
2344             mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2345             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2346             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2347             message.message.time.tv_nsec = ts.tv_nsec;
2348             message.message.time.tv_sec = ts.tv_sec;
2349             message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2350
2351             buildJsonMessage(message);
2352
2353
2354             return -7;
2355     }
2356     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2357         mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2358     }
2359     return 0;
2360 }
2361
2362 /**
2363  * Send message to the CU that is not expecting for successful or unsuccessful results
2364  * @param messageBuffer
2365  * @param message
2366  * @param failedMsgId
2367  * @param sctpMap
2368  * @return
2369  */
2370 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2371                            ReportingMessages_t &message,
2372                            int failedMsgId,
2373                            Sctp_Map_t *sctpMap) {
2374
2375     getRequestMetaData(message, messageBuffer);
2376     if (mdclog_level_get() >= MDCLOG_INFO) {
2377         mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2378     }
2379
2380     auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2381     return rc;
2382 }
2383
2384 /**
2385  *
2386  * @param sctpMap
2387  * @param messageBuffer
2388  * @param message
2389  * @param failedMesgId
2390  * @return
2391  */
2392 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2393                     RmrMessagesBuffer_t &messageBuffer,
2394                     ReportingMessages_t &message,
2395                     int failedMesgId) {
2396     // get the FD
2397     message.message.messageType = messageBuffer.rcvMessage->mtype;
2398     auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2399     return rc;
2400 }
2401
2402 /**
2403  *
2404  * @param rmrCtx the rmr context to send and receive
2405  * @param msg the msg we got fromxApp
2406  * @param metaData data from xApp in ordered struct
2407  * @param failedMesgId the return message type error
2408  */
2409 void
2410 sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
2411     rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
2412     msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
2413                         message.message.enodbName);
2414     if (mdclog_level_get() >= MDCLOG_INFO) {
2415         mdclog_write(MDCLOG_INFO, "%s", msg->payload);
2416     }
2417     msg->mtype = failedMesgId;
2418     msg->state = 0;
2419
2420     static unsigned char tx[32];
2421     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2422     rmr_bytes2xact(msg, tx, strlen((const char *) tx));
2423
2424     sendRmrMessage(rmrMessageBuffer, message);
2425 }
2426
2427
2428
2429 /**
2430  *
2431  * @param epoll_fd
2432  * @param peerInfo
2433  * @param events
2434  * @param sctpMap
2435  * @param enodbName
2436  * @param msgType
2437  * @return
2438  */
2439 int addToEpoll(int epoll_fd,
2440                ConnectedCU_t *peerInfo,
2441                uint32_t events,
2442                Sctp_Map_t *sctpMap,
2443                char *enodbName,
2444                int msgType) {
2445     // Add to Epol
2446     struct epoll_event event{};
2447     event.data.ptr = peerInfo;
2448     event.events = events;
2449     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2450         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2451             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2452                          strerror(errno), __func__, __LINE__);
2453         }
2454         close(peerInfo->fileDescriptor);
2455         if (enodbName != nullptr) {
2456             cleanHashEntry(peerInfo, sctpMap);
2457             char key[MAX_ENODB_NAME_SIZE * 2];
2458             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2459             if (mdclog_level_get() >= MDCLOG_DEBUG) {
2460                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2461             }
2462             auto tmp = sctpMap->find(key);
2463             if (tmp) {
2464                 free(tmp);
2465                 sctpMap->erase(key);
2466             }
2467         } else {
2468             peerInfo->enodbName[0] = 0;
2469         }
2470         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2471         return -1;
2472     }
2473     return 0;
2474 }
2475
2476 /**
2477  *
2478  * @param epoll_fd
2479  * @param peerInfo
2480  * @param events
2481  * @param sctpMap
2482  * @param enodbName
2483  * @param msgType
2484  * @return
2485  */
2486 int modifyToEpoll(int epoll_fd,
2487                   ConnectedCU_t *peerInfo,
2488                   uint32_t events,
2489                   Sctp_Map_t *sctpMap,
2490                   char *enodbName,
2491                   int msgType) {
2492     // Add to Epol
2493     struct epoll_event event{};
2494     event.data.ptr = peerInfo;
2495     event.events = events;
2496     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2497         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2498             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2499                          strerror(errno), __func__, __LINE__);
2500         }
2501         close(peerInfo->fileDescriptor);
2502         cleanHashEntry(peerInfo, sctpMap);
2503         char key[MAX_ENODB_NAME_SIZE * 2];
2504         snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2505         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2506             mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2507         }
2508         auto tmp = sctpMap->find(key);
2509         if (tmp) {
2510             free(tmp);
2511         }
2512         sctpMap->erase(key);
2513         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2514         return -1;
2515     }
2516     return 0;
2517 }
2518
2519
2520 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2521     buildJsonMessage(message);
2522
2523     rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2524
2525     if (rmrMessageBuffer.sendMessage == nullptr) {
2526         rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2527         mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2528         return -1;
2529     }
2530
2531     if (rmrMessageBuffer.sendMessage->state != 0) {
2532         char meid[RMR_MAX_MEID]{};
2533         if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2534             usleep(5);
2535             rmrMessageBuffer.sendMessage->state = 0;
2536             mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2537                          rmrMessageBuffer.sendMessage->mtype,
2538                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2539             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2540             if (rmrMessageBuffer.sendMessage == nullptr) {
2541                 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2542                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2543                 return -1;
2544             } else if (rmrMessageBuffer.sendMessage->state != 0) {
2545                 mdclog_write(MDCLOG_ERR,
2546                              "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2547                              translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2548                              rmrMessageBuffer.sendMessage->mtype,
2549                              rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2550                 auto rc = rmrMessageBuffer.sendMessage->state;
2551                 return rc;
2552             }
2553         } else {
2554             mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2555                          translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2556                          rmrMessageBuffer.sendMessage->mtype,
2557                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2558             return rmrMessageBuffer.sendMessage->state;
2559         }
2560     }
2561     return 0;
2562 }
2563
2564 void buildJsonMessage(ReportingMessages_t &message) {
2565     if (jsonTrace) {
2566         message.outLen = sizeof(message.base64Data);
2567         base64::encode((const unsigned char *) message.message.asndata,
2568                        (const int) message.message.asnLength,
2569                        message.base64Data,
2570                        message.outLen);
2571         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2572             mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2573                          (int) message.message.asnLength,
2574                          (int) message.outLen);
2575         }
2576
2577         snprintf(message.buffer, sizeof(message.buffer),
2578                  "{\"header\": {\"ts\": \"%ld.%09ld\","
2579                  "\"ranName\": \"%s\","
2580                  "\"messageType\": %d,"
2581                  "\"direction\": \"%c\"},"
2582                  "\"base64Length\": %d,"
2583                  "\"asnBase64\": \"%s\"}",
2584                  message.message.time.tv_sec,
2585                  message.message.time.tv_nsec,
2586                  message.message.enodbName,
2587                  message.message.messageType,
2588                  message.message.direction,
2589                  (int) message.outLen,
2590                  message.base64Data);
2591         static src::logger_mt &lg = my_logger::get();
2592
2593         BOOST_LOG(lg) << message.buffer;
2594     }
2595 }
2596
2597
2598 /**
2599  * take RMR error code to string
2600  * @param state
2601  * @return
2602  */
2603 string translateRmrErrorMessages(int state) {
2604     string str = {};
2605     switch (state) {
2606         case RMR_OK:
2607             str = "RMR_OK - state is good";
2608             break;
2609         case RMR_ERR_BADARG:
2610             str = "RMR_ERR_BADARG - argument passd to function was unusable";
2611             break;
2612         case RMR_ERR_NOENDPT:
2613             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2614             break;
2615         case RMR_ERR_EMPTY:
2616             str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2617             break;
2618         case RMR_ERR_NOHDR:
2619             str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2620             break;
2621         case RMR_ERR_SENDFAILED:
2622             str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2623             break;
2624         case RMR_ERR_CALLFAILED:
2625             str = "RMR_ERR_CALLFAILED - unable to send call() message";
2626             break;
2627         case RMR_ERR_NOWHOPEN:
2628             str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2629             break;
2630         case RMR_ERR_WHID:
2631             str = "RMR_ERR_WHID - wormhole id was invalid";
2632             break;
2633         case RMR_ERR_OVERFLOW:
2634             str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2635             break;
2636         case RMR_ERR_RETRY:
2637             str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2638             break;
2639         case RMR_ERR_RCVFAILED:
2640             str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2641             break;
2642         case RMR_ERR_TIMEOUT:
2643             str = "RMR_ERR_TIMEOUT - message processing call timed out";
2644             break;
2645         case RMR_ERR_UNSET:
2646             str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2647             break;
2648         case RMR_ERR_TRUNC:
2649             str = "RMR_ERR_TRUNC - received message likely truncated";
2650             break;
2651         case RMR_ERR_INITFAILED:
2652             str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2653             break;
2654         case RMR_ERR_NOTSUPP:
2655             str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2656             break;
2657         default:
2658             char buf[128]{};
2659             snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
2660             str = buf;
2661             break;
2662     }
2663     return str;
2664 }
2665
2666