3ae156cd07bd8707b9f53f827b133f3830c795da
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 //  This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 //  platform project (RICP).
18
19 // TODO: High-level file comment.
20
21
22
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
26
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
30
31 using namespace std;
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
35
36
37 //#ifdef __cplusplus
38 //extern "C"
39 //{
40 //#endif
41
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
44
45 static void catch_function(int signal) {
46     __gcov_flush();
47     exit(signal);
48 }
49
50
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
52
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
56
57 void init_log() {
58     mdclog_attr_t *attr;
59     mdclog_attr_init(&attr);
60     mdclog_attr_set_ident(attr, "E2Terminator");
61     mdclog_init(attr);
62     mdclog_attr_destroy(attr);
63 }
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
66
67 double age() {
68     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
69 }
70
71 double approx_CPU_MHz(unsigned sleeptime) {
72     using namespace std::chrono_literals;
73     uint32_t aux = 0;
74     uint64_t cycles_start = rdtscp(aux);
75     double time_start = age();
76     std::this_thread::sleep_for(sleeptime * 1ms);
77     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78     double elapsed_time = age() - time_start;
79     return elapsed_cycles / elapsed_time;
80 }
81
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
86
87 int buildListeningPort(sctp_params_t &sctpParams) {
88     sctpParams.listenFD = socket (AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89     struct sockaddr_in6 servaddr {};
90     servaddr.sin6_family = AF_INET6;
91     servaddr.sin6_addr   = in6addr_any;
92     servaddr.sin6_port = htons(sctpParams.sctpPort);
93     if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
94         mdclog_write(MDCLOG_ERR, "Error binding. %s\n", strerror(errno));
95         return -1;
96     }
97     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
98         //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
99         return -1;
100     }
101     if (mdclog_level_get() >= MDCLOG_DEBUG) {
102         struct sockaddr_in6 cliaddr {};
103         socklen_t len = sizeof(cliaddr);
104         getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
105         char buff[1024] {};
106         inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
107         mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
108     }
109
110     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
111         mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
112         return -1;
113     }
114     struct epoll_event event {};
115     event.events = EPOLLIN | EPOLLET;
116     event.data.fd = sctpParams.listenFD;
117
118     // add listening port to epoll
119     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
120         printf("Failed to add descriptor to epoll\n");
121         mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
122         return -1;
123     }
124
125     return 0;
126 }
127
128 int buildConfiguration(sctp_params_t &sctpParams) {
129     path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
130     if (exists(p)) {
131         const int size = 2048;
132         auto fileSize = file_size(p);
133         if (fileSize > size) {
134             mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
135             return -1;
136         }
137     } else {
138         mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
139         return -1;
140     }
141
142     ReadConfigFile conf;
143     if (conf.openConfigFile(p.string()) == -1) {
144         mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
145                      p.string().c_str(), strerror(errno));
146         return -1;
147     }
148     int rmrPort = conf.getIntValue("nano");
149     if (rmrPort == -1) {
150         mdclog_write(MDCLOG_ERR, "illigal RMR port ");
151         return -1;
152     }
153     sctpParams.rmrPort = (uint16_t)rmrPort;
154     snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
155
156     auto tmpStr = conf.getStringValue("loglevel");
157     if (tmpStr.length() == 0) {
158         mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
159         tmpStr = "info";
160     }
161     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
162
163     if ((tmpStr.compare("debug")) == 0) {
164         sctpParams.logLevel = MDCLOG_DEBUG;
165     } else if ((tmpStr.compare("info")) == 0) {
166         sctpParams.logLevel = MDCLOG_INFO;
167     } else if ((tmpStr.compare("warning")) == 0) {
168         sctpParams.logLevel = MDCLOG_WARN;
169     } else if ((tmpStr.compare("error")) == 0) {
170         sctpParams.logLevel = MDCLOG_ERR;
171     } else {
172         mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
173         sctpParams.logLevel = MDCLOG_INFO;
174     }
175     mdclog_level_set(sctpParams.logLevel);
176
177     tmpStr = conf.getStringValue("volume");
178     if (tmpStr.length() == 0) {
179         mdclog_write(MDCLOG_ERR, "illigal volume.");
180         return -1;
181     }
182
183     char tmpLogFilespec[VOLUME_URL_SIZE];
184     tmpLogFilespec[0] = 0;
185     sctpParams.volume[0] = 0;
186     snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
187     // copy the name to temp file as well
188     snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
189
190
191     // define the file name in the tmp directory under the volume
192     strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
193
194     sctpParams.myIP = conf.getStringValue("local-ip");
195     if (sctpParams.myIP.length() == 0) {
196         mdclog_write(MDCLOG_ERR, "illigal local-ip.");
197         return -1;
198     }
199
200     int sctpPort = conf.getIntValue("sctp-port");
201     if (sctpPort == -1) {
202         mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
203         return -1;
204     }
205     sctpParams.sctpPort = (uint16_t)sctpPort;
206
207     sctpParams.fqdn = conf.getStringValue("external-fqdn");
208     if (sctpParams.fqdn.length() == 0) {
209         mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
210         return -1;
211     }
212
213     std::string pod = conf.getStringValue("pod_name");
214     if (pod.length() == 0) {
215         mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
216         return -1;
217     }
218     auto *podName = getenv(pod.c_str());
219     if (podName == nullptr) {
220         mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
221         return -1;
222
223     } else {
224         sctpParams.podName.assign(podName);
225         if (sctpParams.podName.length() == 0) {
226             mdclog_write(MDCLOG_ERR, "illigal pod_name");
227             return -1;
228         }
229     }
230
231     tmpStr = conf.getStringValue("trace");
232     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
233     if ((tmpStr.compare("start")) == 0) {
234         mdclog_write(MDCLOG_INFO, "Trace set to: start");
235         sctpParams.trace = true;
236     } else if ((tmpStr.compare("stop")) == 0) {
237         mdclog_write(MDCLOG_INFO, "Trace set to: stop");
238         sctpParams.trace = false;
239     }
240     jsonTrace = sctpParams.trace;
241
242     sctpParams.epollTimeOut = -1;
243     tmpStr = conf.getStringValue("prometheusMode");
244     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
245     if (tmpStr.length() != 0) {
246         if (tmpStr.compare("push") == 0) {
247             sctpParams.prometheusPushAddress = tmpStr;
248             auto timeout = conf.getIntValue("prometheusPushTimeOut");
249             if (timeout >= 5 && timeout <= 300) {
250                 sctpParams.epollTimeOut = timeout * 1000;
251             } else {
252                 sctpParams.epollTimeOut = 10 * 1000;
253             }
254         }
255     }
256
257     tmpStr = conf.getStringValue("prometheusPushAddr");
258     if (tmpStr.length() != 0) {
259         sctpParams.prometheusMode = tmpStr;
260     }
261
262     tmpStr = conf.getStringValue("prometheusPort");
263     if (tmpStr.length() != 0) {
264         sctpParams.prometheusPort = tmpStr;
265     }
266
267     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
268                                                                                     "\"fqdn\": \"%s\","
269                                                                                     "\"pod_name\": \"%s\"}",
270                                             (const char *)sctpParams.myIP.c_str(),
271                                             sctpParams.rmrPort,
272                                             sctpParams.fqdn.c_str(),
273                                             sctpParams.podName.c_str());
274
275     if (mdclog_level_get() >= MDCLOG_INFO) {
276         mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
277         mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
278         mdclog_mdc_add("volume", sctpParams.volume);
279         mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
280         mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
281         mdclog_mdc_add("pod name", sctpParams.podName.c_str());
282
283         mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
284     }
285     mdclog_mdc_clean();
286
287     // Files written to the current working directory
288     boostLogger = logging::add_file_log(
289             keywords::file_name = tmpLogFilespec, // to temp directory
290             keywords::rotation_size = 10 * 1024 * 1024,
291             keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
292             keywords::format = "%Message%"
293             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
294     );
295
296     // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
297     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
298             keywords::target = sctpParams.volume
299     ));
300
301     // Upon restart, scan the directory for files matching the file_name pattern
302     boostLogger->locked_backend()->scan_for_files();
303
304     // Enable auto-flushing after each tmpStr record written
305     if (mdclog_level_get() >= MDCLOG_DEBUG) {
306         boostLogger->locked_backend()->auto_flush(true);
307     }
308
309     return 0;
310 }
311
312 static std::string GetHostName() {
313     char hostname[1024];
314
315     if (::gethostname(hostname, sizeof(hostname))) {
316         return {};
317     }
318     return hostname;
319 }
320
321
322
323 int main(const int argc, char **argv) {
324     sctp_params_t sctpParams;
325
326     {
327         std::random_device device{};
328         std::mt19937 generator(device());
329         std::uniform_int_distribution<long> distribution(1, (long) 1e12);
330         transactionCounter = distribution(generator);
331     }
332
333 //    uint64_t st = 0;
334 //    uint32_t aux1 = 0;
335 //   st = rdtscp(aux1);
336
337     unsigned num_cpus = std::thread::hardware_concurrency();
338     init_log();
339     mdclog_level_set(MDCLOG_INFO);
340
341     if (std::signal(SIGINT, catch_function) == SIG_ERR) {
342         mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
343         exit(1);
344     }
345     if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
346         mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
347         exit(1);
348     }
349     if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
350         mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
351         exit(1);
352     }
353
354     cpuClock = approx_CPU_MHz(100);
355
356     mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
357
358     auto result = parse(argc, argv, sctpParams);
359
360     if (buildConfiguration(sctpParams) != 0) {
361         exit(-1);
362     }
363
364     //auto registry = std::make_shared<Registry>();
365     sctpParams.prometheusRegistry = std::make_shared<Registry>();
366
367     //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
368
369     sctpParams.prometheusFamily = &BuildCounter()
370             .Name("E2T")
371             .Help("E2T message counter")
372             .Labels({{"E", sctpParams.podName}})
373             .Register(*sctpParams.prometheusRegistry);
374
375
376     // start epoll
377     sctpParams.epoll_fd = epoll_create1(0);
378     if (sctpParams.epoll_fd == -1) {
379         mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
380         exit(-1);
381     }
382
383     getRmrContext(sctpParams);
384     if (sctpParams.rmrCtx == nullptr) {
385         close(sctpParams.epoll_fd);
386         exit(-1);
387     }
388
389     if (buildInotify(sctpParams) == -1) {
390         close(sctpParams.rmrListenFd);
391         rmr_close(sctpParams.rmrCtx);
392         close(sctpParams.epoll_fd);
393         exit(-1);
394     }
395
396     if (buildListeningPort(sctpParams) != 0) {
397         close(sctpParams.rmrListenFd);
398         rmr_close(sctpParams.rmrCtx);
399         close(sctpParams.epoll_fd);
400         exit(-1);
401     }
402
403     sctpParams.sctpMap = new mapWrapper();
404
405     std::vector<std::thread> threads(num_cpus);
406 //    std::vector<std::thread> threads;
407
408     if (sctpParams.prometheusMode.compare("pull") == 0) {
409         sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
410         sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
411     } else if (sctpParams.prometheusMode.compare("push") == 0) {
412         const auto labels = Gateway::GetInstanceLabel(GetHostName());
413         string address {};
414         string port {};
415         char ch = ':';
416         auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
417         // If string doesn't have
418         // character ch present in it
419         if (found != string::npos) {
420             address = sctpParams.prometheusPushAddress.substr(0,found);
421             port = sctpParams.prometheusPushAddress.substr(found + 1);
422             sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
423             sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
424         } else {
425             mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
426         }
427     }
428
429     num_cpus = 1;
430     for (unsigned int i = 0; i < num_cpus; i++) {
431         threads[i] = std::thread(listener, &sctpParams);
432
433         cpu_set_t cpuset;
434         CPU_ZERO(&cpuset);
435         CPU_SET(i, &cpuset);
436         int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
437         if (rc != 0) {
438             mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
439         }
440     }
441
442
443     //loop over term_init until first message from xApp
444     handleTermInit(sctpParams);
445
446     for (auto &t : threads) {
447         t.join();
448     }
449
450     return 0;
451 }
452
453 void handleTermInit(sctp_params_t &sctpParams) {
454     sendTermInit(sctpParams);
455     //send to e2 manager init of e2 term
456     //E2_TERM_INIT
457
458     int count = 0;
459     while (true) {
460         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
461         if (xappMessages > 0) {
462             if (mdclog_level_get() >=  MDCLOG_INFO) {
463                 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
464             }
465             return;
466         }
467         usleep(100000);
468         count++;
469         if (count % 1000 == 0) {
470             mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
471             sendTermInit(sctpParams);
472         }
473     }
474 }
475
476 void sendTermInit(sctp_params_t &sctpParams) {
477     rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
478     auto count = 0;
479     while (true) {
480         msg->mtype = E2_TERM_INIT;
481         msg->state = 0;
482         rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
483         static unsigned char tx[32];
484         auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
485         rmr_bytes2xact(msg, tx, txLen);
486         msg = rmr_send_msg(sctpParams.rmrCtx, msg);
487         if (msg == nullptr) {
488             msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
489         } else if (msg->state == 0) {
490             rmr_free_msg(msg);
491             if (mdclog_level_get() >=  MDCLOG_INFO) {
492                 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
493             }
494             return;
495         } else {
496             if (count % 100 == 0) {
497                 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
498             }
499             sleep(1);
500         }
501         count++;
502     }
503 }
504
505 /**
506  *
507  * @param argc
508  * @param argv
509  * @param sctpParams
510  * @return
511  */
512 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
513     cxxopts::Options options(argv[0], "e2 term help");
514     options.positional_help("[optional args]").show_positional_help();
515     options.allow_unrecognised_options().add_options()
516             ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
517             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
518             ("h,help", "Print help");
519
520     auto result = options.parse(argc, argv);
521
522     if (result.count("help")) {
523         std::cout << options.help({""}) << std::endl;
524         exit(0);
525     }
526     return result;
527 }
528
529 /**
530  *
531  * @param sctpParams
532  * @return -1 failed 0 success
533  */
534 int buildInotify(sctp_params_t &sctpParams) {
535     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
536     if (sctpParams.inotifyFD == -1) {
537         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
538         close(sctpParams.rmrListenFd);
539         rmr_close(sctpParams.rmrCtx);
540         close(sctpParams.epoll_fd);
541         return -1;
542     }
543
544     sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
545                                              (const char *)sctpParams.configFilePath.c_str(),
546                                              (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
547     if (sctpParams.inotifyWD == -1) {
548         mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to  inotify (inotify_add_watch) %s",
549                      sctpParams.configFilePath.c_str(),
550                      strerror(errno));
551         close(sctpParams.inotifyFD);
552         return -1;
553     }
554
555     struct epoll_event event{};
556     event.events = (EPOLLIN);
557     event.data.fd = sctpParams.inotifyFD;
558     // add listening RMR FD to epoll
559     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
560         mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
561         close(sctpParams.inotifyFD);
562         return -1;
563     }
564     return 0;
565 }
566
567 /**
568  *
569  * @param args
570  * @return
571  */
572 void listener(sctp_params_t *params) {
573     int num_of_SCTP_messages = 0;
574     auto totalTime = 0.0;
575     mdclog_mdc_clean();
576     mdclog_level_set(params->logLevel);
577
578     std::thread::id this_id = std::this_thread::get_id();
579     //save cout
580     streambuf *oldCout = cout.rdbuf();
581     ostringstream memCout;
582     // create new cout
583     cout.rdbuf(memCout.rdbuf());
584     cout << this_id;
585     //return to the normal cout
586     cout.rdbuf(oldCout);
587
588     char tid[32];
589     memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
590     tid[memCout.str().length()] = 0;
591     mdclog_mdc_add("thread id", tid);
592
593     if (mdclog_level_get() >= MDCLOG_DEBUG) {
594         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
595     }
596
597     RmrMessagesBuffer_t rmrMessageBuffer{};
598     //create and init RMR
599     rmrMessageBuffer.rmrCtx = params->rmrCtx;
600
601     auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
602     struct timespec end{0, 0};
603     struct timespec start{0, 0};
604
605     rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
606     rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
607
608     memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
609     rmrMessageBuffer.ka_message_len = params->ka_message_length;
610     rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
611
612     if (mdclog_level_get() >= MDCLOG_DEBUG) {
613         mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
614     }
615
616     ReportingMessages_t message {};
617
618 //    for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
619 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
620 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
621 //    }
622
623     bool gatewayflag = false;
624     while (true) {
625         future<int> gateWay;
626
627         if (mdclog_level_get() >= MDCLOG_DEBUG) {
628             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
629         }
630         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
631         if (numOfEvents == 0) {
632             if (params->prometheusGateway != nullptr) {
633                 gateWay = params->prometheusGateway->AsyncPush();
634                 gatewayflag = true;
635             }
636             continue;
637         } else if (numOfEvents < 0) {
638             if (errno == EINTR) {
639                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
640                     mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
641                 }
642                 continue;
643             }
644             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
645             return;
646         }
647         if (gatewayflag) {
648             gatewayflag = false;
649             auto rc = gateWay.get();
650             if (rc != 200) {
651                 mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
652             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
653                 mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
654             }
655         }
656         for (auto i = 0; i < numOfEvents; i++) {
657             if (mdclog_level_get() >= MDCLOG_DEBUG) {
658                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
659             }
660             clock_gettime(CLOCK_MONOTONIC, &message.message.time);
661             start.tv_sec = message.message.time.tv_sec;
662             start.tv_nsec = message.message.time.tv_nsec;
663
664
665             if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
666                 handlepoll_error(events[i], message, rmrMessageBuffer, params);
667             } else if (events[i].events & EPOLLOUT) {
668                 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
669             } else if (params->listenFD == events[i].data.fd) {
670                 if (mdclog_level_get() >= MDCLOG_INFO) {
671                     mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
672                 }
673                 // new connection is requested from RAN  start build connection
674                 while (true) {
675                     struct sockaddr in_addr {};
676                     socklen_t in_len;
677                     char hostBuff[NI_MAXHOST];
678                     char portBuff[NI_MAXSERV];
679
680                     in_len = sizeof(in_addr);
681                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
682                     peerInfo->sctpParams = params;
683                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
684                     if (peerInfo->fileDescriptor == -1) {
685                         if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
686                             /* We have processed all incoming connections. */
687                             break;
688                         } else {
689                             mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
690                             break;
691                         }
692                     }
693                     if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
694                         mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
695                         close(peerInfo->fileDescriptor);
696                         break;
697                     }
698                     auto  ans = getnameinfo(&in_addr, in_len,
699                                             peerInfo->hostName, NI_MAXHOST,
700                                             peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
701                     if (ans < 0) {
702                         mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
703                         close(peerInfo->fileDescriptor);
704                         break;
705                     }
706                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
707                         mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
708                     }
709                     peerInfo->isConnected = false;
710                     peerInfo->gotSetup = false;
711                     if (addToEpoll(params->epoll_fd,
712                                    peerInfo,
713                                    (EPOLLIN | EPOLLET),
714                                    params->sctpMap, nullptr,
715                                    0) != 0) {
716                         break;
717                     }
718                     break;
719                 }
720             } else if (params->rmrListenFd == events[i].data.fd) {
721                 // got message from XAPP
722                 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
723                 num_of_messages.fetch_add(1, std::memory_order_release);
724                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
725                     mdclog_write(MDCLOG_DEBUG, "new message from RMR");
726                 }
727                 if (receiveXappMessages(params->sctpMap,
728                                         rmrMessageBuffer,
729                                         message.message.time) != 0) {
730                     mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
731                 }
732             } else if (params->inotifyFD == events[i].data.fd) {
733                 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
734                 handleConfigChange(params);
735             } else {
736                 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
737                  * We must read whatever data is available completely, as we are running
738                  *  in edge-triggered mode and won't get a notification again for the same data. */
739                 num_of_messages.fetch_add(1, std::memory_order_release);
740                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
741                     mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
742                 }
743                 receiveDataFromSctp(&events[i],
744                                     params->sctpMap,
745                                     num_of_SCTP_messages,
746                                     rmrMessageBuffer,
747                                     message.message.time);
748             }
749
750             clock_gettime(CLOCK_MONOTONIC, &end);
751             if (mdclog_level_get() >= MDCLOG_INFO) {
752                 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
753                               ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
754             }
755             if (mdclog_level_get() >= MDCLOG_DEBUG) {
756                 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
757                              end.tv_sec - start.tv_sec,
758                              end.tv_nsec - start.tv_nsec);
759             }
760         }
761     }
762 }
763
764 /**
765  *
766  * @param sctpParams
767  */
768 void handleConfigChange(sctp_params_t *sctpParams) {
769     char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
770     const struct inotify_event *event;
771     char *ptr;
772
773     path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
774     auto endlessLoop = true;
775     while (endlessLoop) {
776         auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
777         if (len == -1) {
778             if (errno != EAGAIN) {
779                 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
780                 endlessLoop = false;
781                 continue;
782             }
783             else {
784                 endlessLoop = false;
785                 continue;
786             }
787         }
788
789         for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
790             event = (const struct inotify_event *)ptr;
791             if (event->mask & (uint32_t)IN_ISDIR) {
792                 continue;
793             }
794
795             // the directory name
796             if (sctpParams->inotifyWD == event->wd) {
797                 // not the directory
798             }
799             if (event->len) {
800                 auto  retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
801                 if (retVal != 0) {
802                     continue;
803                 }
804             }
805             // only the file we want
806             if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
807                 if (mdclog_level_get() >= MDCLOG_INFO) {
808                     mdclog_write(MDCLOG_INFO, "Configuration file changed");
809                 }
810                 if (exists(p)) {
811                     const int size = 2048;
812                     auto fileSize = file_size(p);
813                     if (fileSize > size) {
814                         mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
815                         return;
816                     }
817                 } else {
818                     mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
819                     return;
820                 }
821
822                 ReadConfigFile conf;
823                 if (conf.openConfigFile(p.string()) == -1) {
824                     mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
825                                  p.string().c_str(), strerror(errno));
826                     return;
827                 }
828
829                 auto tmpStr = conf.getStringValue("loglevel");
830                 if (tmpStr.length() == 0) {
831                     mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
832                     tmpStr = "info";
833                 }
834                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
835
836                 if ((tmpStr.compare("debug")) == 0) {
837                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
838                     sctpParams->logLevel = MDCLOG_DEBUG;
839                 } else if ((tmpStr.compare("info")) == 0) {
840                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
841                     sctpParams->logLevel = MDCLOG_INFO;
842                 } else if ((tmpStr.compare("warning")) == 0) {
843                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
844                     sctpParams->logLevel = MDCLOG_WARN;
845                 } else if ((tmpStr.compare("error")) == 0) {
846                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
847                     sctpParams->logLevel = MDCLOG_ERR;
848                 } else {
849                     mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
850                     sctpParams->logLevel = MDCLOG_INFO;
851                 }
852                 mdclog_level_set(sctpParams->logLevel);
853
854
855                 tmpStr = conf.getStringValue("trace");
856                 if (tmpStr.length() == 0) {
857                     mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
858                     tmpStr = "stop";
859                 }
860
861                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
862                 if ((tmpStr.compare("start")) == 0) {
863                     mdclog_write(MDCLOG_INFO, "Trace set to: start");
864                     sctpParams->trace = true;
865                 } else if ((tmpStr.compare("stop")) == 0) {
866                     mdclog_write(MDCLOG_INFO, "Trace set to: stop");
867                     sctpParams->trace = false;
868                 } else {
869                     mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
870                     sctpParams->trace = false;
871                 }
872                 jsonTrace = sctpParams->trace;
873
874                 if (sctpParams->prometheusMode.compare("push") == 0) {
875                     auto timeout = conf.getIntValue("prometheusPushTimeOut");
876                     if (timeout >= 5 && timeout <= 300) {
877                         sctpParams->epollTimeOut = timeout * 1000;
878                     } else {
879                         mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
880                                      timeout);
881                     }
882                 }
883
884                 endlessLoop = false;
885             }
886         }
887     }
888 }
889
890 /**
891  *
892  * @param event
893  * @param message
894  * @param rmrMessageBuffer
895  * @param params
896  */
897 void handleEinprogressMessages(struct epoll_event &event,
898                                ReportingMessages_t &message,
899                                RmrMessagesBuffer_t &rmrMessageBuffer,
900                                sctp_params_t *params) {
901     auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
902     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
903
904     mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
905     auto retVal = 0;
906     socklen_t retValLen = 0;
907     auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
908     if (rc != 0 || retVal != 0) {
909         if (rc != 0) {
910             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
911                                                          "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
912                                                          peerInfo->enodbName, strerror(errno));
913         } else if (retVal != 0) {
914             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
915                                                          "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
916                                                          peerInfo->enodbName);
917         }
918
919         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
920         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
921         mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
922         message.message.direction = 'N';
923         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
924             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
925         }
926         memset(peerInfo->asnData, 0, peerInfo->asnLength);
927         peerInfo->asnLength = 0;
928         peerInfo->mtype = 0;
929         return;
930     }
931
932     peerInfo->isConnected = true;
933
934     if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
935                       peerInfo->mtype) != 0) {
936         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
937         return;
938     }
939
940     message.message.asndata = (unsigned char *)peerInfo->asnData;
941     message.message.asnLength = peerInfo->asnLength;
942     message.message.messageType = peerInfo->mtype;
943     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
944     num_of_messages.fetch_add(1, std::memory_order_release);
945     if (mdclog_level_get() >= MDCLOG_DEBUG) {
946         mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
947                      message.message.enodbName);
948     }
949     if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
950         if (mdclog_level_get() >= MDCLOG_DEBUG) {
951             mdclog_write(MDCLOG_DEBUG, "Error write to SCTP  %s %d", __func__, __LINE__);
952         }
953         return;
954     }
955
956     memset(peerInfo->asnData, 0, peerInfo->asnLength);
957     peerInfo->asnLength = 0;
958     peerInfo->mtype = 0;
959 }
960
961
962 void handlepoll_error(struct epoll_event &event,
963                       ReportingMessages_t &message,
964                       RmrMessagesBuffer_t &rmrMessageBuffer,
965                       sctp_params_t *params) {
966     if (event.data.fd != params->rmrListenFd) {
967         auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
968         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
969                      event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
970
971         rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
972                                                      "%s|Failed SCTP Connection",
973                                                      peerInfo->enodbName);
974         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
975         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
976
977         memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
978         message.message.direction = 'N';
979         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
980             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
981         }
982
983         close(peerInfo->fileDescriptor);
984         params->sctpMap->erase(peerInfo->enodbName);
985         cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
986     } else {
987         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
988     }
989 }
990 /**
991  *
992  * @param socket
993  * @return
994  */
995 int setSocketNoBlocking(int socket) {
996     auto flags = fcntl(socket, F_GETFL, 0);
997
998     if (flags == -1) {
999         mdclog_mdc_add("func", "fcntl");
1000         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1001         mdclog_mdc_clean();
1002         return -1;
1003     }
1004
1005     flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1006     if (fcntl(socket, F_SETFL, flags) == -1) {
1007         mdclog_mdc_add("func", "fcntl");
1008         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1009         mdclog_mdc_clean();
1010         return -1;
1011     }
1012
1013     return 0;
1014 }
1015
1016 /**
1017  *
1018  * @param val
1019  * @param m
1020  */
1021 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1022     char *dummy;
1023     auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1024     char searchBuff[2048]{};
1025
1026     snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1027     m->erase(searchBuff);
1028
1029     m->erase(val->enodbName);
1030     free(val);
1031 }
1032
1033 /**
1034  *
1035  * @param fd file discriptor
1036  * @param data the asn data to send
1037  * @param len  length of the data
1038  * @param enodbName the enodbName as in the map for printing purpose
1039  * @param m map host information
1040  * @param mtype message number
1041  * @return 0 success, anegative number on fail
1042  */
1043 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1044     auto loglevel = mdclog_level_get();
1045     int fd = peerInfo->fileDescriptor;
1046     if (loglevel >= MDCLOG_DEBUG) {
1047         mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1048                      message.message.enodbName, __FUNCTION__);
1049     }
1050
1051     while (true) {
1052         if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1053             if (errno == EINTR) {
1054                 continue;
1055             }
1056             mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1057             if (!peerInfo->isConnected) {
1058                 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1059                 return -1;
1060             }
1061             cleanHashEntry(peerInfo, m);
1062             close(fd);
1063             char key[MAX_ENODB_NAME_SIZE * 2];
1064             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1065                      message.message.messageType);
1066             if (loglevel >= MDCLOG_DEBUG) {
1067                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1068             }
1069             auto tmp = m->find(key);
1070             if (tmp) {
1071                 free(tmp);
1072             }
1073             m->erase(key);
1074             return -1;
1075         }
1076         message.message.direction = 'D';
1077         // send report.buffer of size
1078         buildJsonMessage(message);
1079
1080         if (loglevel >= MDCLOG_DEBUG) {
1081             mdclog_write(MDCLOG_DEBUG,
1082                          "SCTP message for CU %s sent from %s",
1083                          message.message.enodbName,
1084                          __FUNCTION__);
1085         }
1086         return 0;
1087     }
1088 }
1089
1090 /**
1091  *
1092  * @param message
1093  * @param rmrMessageBuffer
1094  */
1095 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1096     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1097     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1098
1099     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1100         mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1101                      message.message.enodbName, (unsigned long) message.message.asnLength);
1102     }
1103 }
1104
1105
1106
1107 /**
1108  *
1109  * @param events
1110  * @param sctpMap
1111  * @param numOfMessages
1112  * @param rmrMessageBuffer
1113  * @param ts
1114  * @return
1115  */
1116 int receiveDataFromSctp(struct epoll_event *events,
1117                         Sctp_Map_t *sctpMap,
1118                         int &numOfMessages,
1119                         RmrMessagesBuffer_t &rmrMessageBuffer,
1120                         struct timespec &ts) {
1121     /* We have data on the fd waiting to be read. Read and display it.
1122  * We must read whatever data is available completely, as we are running
1123  *  in edge-triggered mode and won't get a notification again for the same data. */
1124     ReportingMessages_t message {};
1125     auto done = 0;
1126     auto loglevel = mdclog_level_get();
1127
1128     // get the identity of the interface
1129     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1130
1131     struct timespec start{0, 0};
1132     struct timespec decodestart{0, 0};
1133     struct timespec end{0, 0};
1134
1135     E2AP_PDU_t *pdu = nullptr;
1136
1137     while (true) {
1138         if (loglevel >= MDCLOG_DEBUG) {
1139             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1140             clock_gettime(CLOCK_MONOTONIC, &start);
1141         }
1142         // read the buffer directly to rmr payload
1143         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1144         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1145                 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1146
1147         if (loglevel >= MDCLOG_DEBUG) {
1148             mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1149                          message.peerInfo->fileDescriptor, message.message.asnLength);
1150         }
1151
1152         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1153         message.message.direction = 'U';
1154         message.message.time.tv_nsec = ts.tv_nsec;
1155         message.message.time.tv_sec = ts.tv_sec;
1156
1157         if (message.message.asnLength < 0) {
1158             if (errno == EINTR) {
1159                 continue;
1160             }
1161             /* If errno == EAGAIN, that means we have read all
1162                data. So goReportingMessages_t back to the main loop. */
1163             if (errno != EAGAIN) {
1164                 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1165                 done = 1;
1166             } else if (loglevel >= MDCLOG_DEBUG) {
1167                 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1168             }
1169             break;
1170         } else if (message.message.asnLength == 0) {
1171             /* End of file. The remote has closed the connection. */
1172             if (loglevel >= MDCLOG_INFO) {
1173                 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1174                              message.peerInfo->fileDescriptor);
1175             }
1176             done = 1;
1177             break;
1178         }
1179
1180         if (loglevel >= MDCLOG_DEBUG) {
1181             char printBuffer[4096]{};
1182             char *tmp = printBuffer;
1183             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1184                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1185                 tmp += 2;
1186             }
1187             printBuffer[message.message.asnLength] = 0;
1188             clock_gettime(CLOCK_MONOTONIC, &end);
1189             mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1190                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1191             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
1192                          printBuffer);
1193             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1194         }
1195
1196         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1197                           message.message.asndata, message.message.asnLength);
1198         if (rval.code != RC_OK) {
1199             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1200                          message.peerInfo->enodbName);
1201             break;
1202         }
1203
1204         if (loglevel >= MDCLOG_DEBUG) {
1205             clock_gettime(CLOCK_MONOTONIC, &end);
1206             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1207                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1208             char *printBuffer;
1209             size_t size;
1210             FILE *stream = open_memstream(&printBuffer, &size);
1211             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1212             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1213             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1214         }
1215
1216         switch (pdu->present) {
1217             case E2AP_PDU_PR_initiatingMessage: {//initiating message
1218                 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1219                 break;
1220             }
1221             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1222                 asnSuccsesfulMsg(pdu, sctpMap, message,  rmrMessageBuffer);
1223                 break;
1224             }
1225             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1226                 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1227                 break;
1228             }
1229             default:
1230                 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1231                 break;
1232         }
1233         if (loglevel >= MDCLOG_DEBUG) {
1234             clock_gettime(CLOCK_MONOTONIC, &end);
1235             mdclog_write(MDCLOG_DEBUG,
1236                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1237                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1238         }
1239         numOfMessages++;
1240         if (pdu != nullptr) {
1241             ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1242             //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1243             //pdu = nullptr;
1244         }
1245     }
1246
1247     if (done) {
1248         if (loglevel >= MDCLOG_INFO) {
1249             mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1250         }
1251         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1252                 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1253                          256,
1254                          "%s|CU disconnected unexpectedly",
1255                          message.peerInfo->enodbName);
1256         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1257
1258         if (sendRequestToXapp(message,
1259                               RIC_SCTP_CONNECTION_FAILURE,
1260                               rmrMessageBuffer) != 0) {
1261             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1262         }
1263
1264         /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1265         close(message.peerInfo->fileDescriptor);
1266         cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1267     }
1268     if (loglevel >= MDCLOG_DEBUG) {
1269         clock_gettime(CLOCK_MONOTONIC, &end);
1270         mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1271                      end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1272
1273     }
1274     return 0;
1275 }
1276
1277 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1278                                      RmrMessagesBuffer_t &rmrMessageBuffer,
1279                                      E2AP_PDU_t *pdu/*,
1280                                      string const &messageName,
1281                                      string const &ieName,
1282                                      vector<string> &functionsToAdd_v,
1283                                      vector<string> &functionsToModified_v*/) {
1284     auto logLevel = mdclog_level_get();
1285     // now we can send the data to e2Mgr
1286
1287     asn_enc_rval_t er;
1288     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1289     unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1290     while (true) {
1291         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1292         if (er.encoded == -1) {
1293             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1294             return;
1295         } else if (er.encoded > (ssize_t) buffer_size) {
1296             buffer_size = er.encoded + 128;
1297             mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1298                          (int) buffer_size,
1299                          asn_DEF_E2AP_PDU.name, buffer_size);
1300             buffer_size = er.encoded + 128;
1301 //            free(buffer);
1302             continue;
1303         }
1304         buffer[er.encoded] = '\0';
1305         break;
1306     }
1307     // encode to xml
1308
1309     string res((char *)buffer);
1310     res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1311     res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1312     res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1313
1314 //    string res {};
1315 //    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1316 //        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1317 //    }
1318     rmr_mbuf_t *rmrMsg;
1319 //    if (res.length() == 0) {
1320 //        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1321 //        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1322 //                               message.peerInfo->sctpParams->myIP.c_str(),
1323 //                               message.peerInfo->sctpParams->rmrPort,
1324 //                               buffer);
1325 //    } else {
1326         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1327         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1328                                message.peerInfo->sctpParams->myIP.c_str(),
1329                                message.peerInfo->sctpParams->rmrPort,
1330                                res.c_str());
1331 //    }
1332
1333     if (logLevel >= MDCLOG_DEBUG) {
1334         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1335     }
1336     // send to RMR
1337     rmrMsg->mtype = message.message.messageType;
1338     rmrMsg->state = 0;
1339     rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1340
1341     static unsigned char tx[32];
1342     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1343     rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1344
1345     rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1346     if (rmrMsg == nullptr) {
1347         mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1348     } else if (rmrMsg->state != 0) {
1349         char meid[RMR_MAX_MEID]{};
1350         if (rmrMsg->state == RMR_ERR_RETRY) {
1351             usleep(5);
1352             rmrMsg->state = 0;
1353             mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1354                          rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1355             rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1356             if (rmrMsg == nullptr) {
1357                 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1358             } else if (rmrMsg->state != 0) {
1359                 mdclog_write(MDCLOG_ERR,
1360                              "RMR Retry failed %s sending request %d to Xapp from %s",
1361                              translateRmrErrorMessages(rmrMsg->state).c_str(),
1362                              rmrMsg->mtype,
1363                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
1364             }
1365         } else {
1366             mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1367                          translateRmrErrorMessages(rmrMsg->state).c_str(),
1368                          rmrMsg->mtype,
1369                          rmr_get_meid(rmrMsg, (unsigned char *) meid));
1370         }
1371     }
1372     message.peerInfo->gotSetup = true;
1373     buildJsonMessage(message);
1374     if (rmrMsg != nullptr) {
1375         rmr_free_msg(rmrMsg);
1376     }
1377 }
1378
1379 #if 0
1380 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1381     auto index = 0;
1382     runFunXML_v.clear();
1383     for (auto j = 0; j < list.list.count; j++) {
1384         auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1385         if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1386             (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1387             // encode to xml
1388             E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1389             auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1390                                    &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1391                                    (void **)&ranFunDef,
1392                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1393                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1394             if (rval.code != RC_OK) {
1395                 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1396                              rval.code,
1397                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1398                 return -1;
1399             }
1400
1401             auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1402             unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1403             memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1404             // encode to xml
1405             auto er = asn_encode_to_buffer(nullptr,
1406                                            ATS_BASIC_XER,
1407                                            &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1408                                            ranFunDef,
1409                                            xml_buffer,
1410                                            xml_buffer_size);
1411             if (er.encoded == -1) {
1412                 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1413                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1414                              strerror(errno));
1415             } else if (er.encoded > (ssize_t)xml_buffer_size) {
1416                 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1417                              (int) xml_buffer_size,
1418                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1419             } else {
1420                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1421                     mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1422                                  asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1423                                  index++,
1424                                  xml_buffer);
1425                 }
1426
1427                 string runFuncs = (char *)(xml_buffer);
1428                 runFunXML_v.emplace_back(runFuncs);
1429             }
1430         }
1431     }
1432     return 0;
1433 }
1434
1435 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1436                                      Sctp_Map_t *sctpMap,
1437                                      ReportingMessages_t &message,
1438                                      vector <string> &RANfunctionsAdded_v,
1439                                      vector <string> &RANfunctionsModified_v) {
1440     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1441     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1442         auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1443         if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1444             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1445                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1446                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1447                                  ie->value.choice.RANfunctions_List.list.count);
1448                 }
1449                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1450                     return -1;
1451                 }
1452             }
1453         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1454             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1455                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1456                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1457                                  ie->value.choice.RANfunctions_List.list.count);
1458                 }
1459                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1460                     return -1;
1461                 }
1462             }
1463         }
1464     }
1465     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1466         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1467                      RANfunctionsAdded_v.size());
1468     }
1469     return 0;
1470 }
1471
1472 #endif
1473
1474
1475 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1476     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1477     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1478
1479     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1480     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1481
1482     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1483     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1484
1485     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1486     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1487
1488     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1489     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1490     // ---------------------------------------------
1491     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1492     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1493
1494     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1495     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1496
1497     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1498     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1499
1500     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1501     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1502     //-------------------------------------------------------------
1503
1504     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1505     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1506
1507     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1508     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1509
1510     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1511     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1512
1513     //====================================================================================
1514     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1515     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1516
1517     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1518     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1519
1520     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1521     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1522
1523     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1524     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1525
1526     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1527     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1528
1529     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1530     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1531     //---------------------------------------------------------------------------------------------------------
1532     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1533     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1534
1535     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1536     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1537
1538     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1539     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1540     //----------------------------------------------------------------------------------------------------------------
1541     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1542     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1543
1544     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1545     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1546 }
1547 /**
1548  *
1549  * @param pdu
1550  * @param sctpMap
1551  * @param message
1552  * @param RANfunctionsAdded_v
1553  * @return
1554  */
1555 int collectSetupRequestData(E2AP_PDU_t *pdu,
1556                                      Sctp_Map_t *sctpMap,
1557                                      ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1558     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1559     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1560         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1561         if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1562             // get the ran name for meid
1563             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1564                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1565                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1566                     // no mesage will be sent
1567                     return -1;
1568                 }
1569
1570                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1571                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1572             }
1573         } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1574             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1575                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1576                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1577                                  ie->value.choice.RANfunctions_List.list.count);
1578                 }
1579                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1580                     return -1;
1581                 }
1582             }
1583         } */
1584     }
1585 //    if (mdclog_level_get() >= MDCLOG_DEBUG) {
1586 //        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1587 //                     RANfunctionsAdded_v.size());
1588 //    }
1589     return 0;
1590 }
1591
1592 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1593     E2AP_PDU_t *pdu = nullptr;
1594
1595     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1596         mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1597                      rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1598     }
1599     auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1600                            rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1601     if (rval.code != RC_OK) {
1602         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1603                      rval.code,
1604                      message.message.enodbName);
1605         return -1;
1606     }
1607
1608     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1609     auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1610                                    rmrMessageBuffer.sendMessage->payload, buff_size);
1611     if (er.encoded == -1) {
1612         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1613         return -1;
1614     } else if (er.encoded > (ssize_t)buff_size) {
1615         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1616                      (int)rmrMessageBuffer.sendMessage->len,
1617                      asn_DEF_E2AP_PDU.name,
1618                      __func__,
1619                      __LINE__);
1620         return -1;
1621     }
1622     rmrMessageBuffer.sendMessage->len = er.encoded;
1623     return 0;
1624
1625 }
1626
1627 /**
1628  *
1629  * @param pdu
1630  * @param message
1631  * @param rmrMessageBuffer
1632  */
1633 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1634                           Sctp_Map_t *sctpMap,
1635                           ReportingMessages_t &message,
1636                           RmrMessagesBuffer_t &rmrMessageBuffer) {
1637     auto logLevel = mdclog_level_get();
1638     auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1639     if (logLevel >= MDCLOG_DEBUG) {
1640         mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1641     }
1642     switch (procedureCode) {
1643         case ProcedureCode_id_E2setup: {
1644             if (logLevel >= MDCLOG_DEBUG) {
1645                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1646             }
1647
1648 //            vector <string> RANfunctionsAdded_v;
1649 //            vector <string> RANfunctionsModified_v;
1650 //            RANfunctionsAdded_v.clear();
1651 //            RANfunctionsModified_v.clear();
1652             if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1653                 break;
1654             }
1655
1656             buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1657
1658             string messageName("E2setupRequest");
1659             string ieName("E2setupRequestIEs");
1660             message.message.messageType = RIC_E2_SETUP_REQ;
1661             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
1662             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1663             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1664             break;
1665         }
1666         case ProcedureCode_id_RICserviceUpdate: {
1667             if (logLevel >= MDCLOG_DEBUG) {
1668                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1669             }
1670 //            vector <string> RANfunctionsAdded_v;
1671 //            vector <string> RANfunctionsModified_v;
1672 //            RANfunctionsAdded_v.clear();
1673 //            RANfunctionsModified_v.clear();
1674 //            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1675 //                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1676 //                break;
1677 //            }
1678
1679             string messageName("RICserviceUpdate");
1680             string ieName("RICserviceUpdateIEs");
1681             message.message.messageType = RIC_SERVICE_UPDATE;
1682             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
1683             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1684
1685             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1686             break;
1687         }
1688         case ProcedureCode_id_ErrorIndication: {
1689             if (logLevel >= MDCLOG_DEBUG) {
1690                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1691             }
1692             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
1693             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1694             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1695                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1696             }
1697             break;
1698         }
1699         case ProcedureCode_id_Reset: {
1700             if (logLevel >= MDCLOG_DEBUG) {
1701                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1702             }
1703
1704             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1705             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1706             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1707                 break;
1708             }
1709
1710             if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1711                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1712             }
1713             break;
1714         }
1715         case ProcedureCode_id_RICindication: {
1716             if (logLevel >= MDCLOG_DEBUG) {
1717                 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1718             }
1719             for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1720                 auto messageSent = false;
1721                 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1722                 if (logLevel >= MDCLOG_DEBUG) {
1723                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1724                 }
1725                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1726                     if (logLevel >= MDCLOG_DEBUG) {
1727                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1728                     }
1729                     if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1730                         static unsigned char tx[32];
1731                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1732                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1733                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1734                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1735                                        (unsigned char *)message.message.enodbName,
1736                                        strlen(message.message.enodbName));
1737                         rmrMessageBuffer.sendMessage->state = 0;
1738                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1739
1740                         //ie->value.choice.RICrequestID.ricInstanceID;
1741                         if (mdclog_level_get() >= MDCLOG_DEBUG) {
1742                             mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1743                                          rmrMessageBuffer.sendMessage->sub_id,
1744                                          rmrMessageBuffer.sendMessage->mtype,
1745                                          ie->value.choice.RICrequestID.ricInstanceID,
1746                                          ie->value.choice.RICrequestID.ricRequestorID);
1747                         }
1748                         message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication - 1]->Increment();
1749                         message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1750                         sendRmrMessage(rmrMessageBuffer, message);
1751                         messageSent = true;
1752                     } else {
1753                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1754                     }
1755                 }
1756                 if (messageSent) {
1757                     break;
1758                 }
1759             }
1760             break;
1761         }
1762         default: {
1763             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1764             message.message.messageType = 0; // no RMR message type yet
1765
1766             buildJsonMessage(message);
1767
1768             break;
1769         }
1770     }
1771 }
1772
1773 /**
1774  *
1775  * @param pdu
1776  * @param message
1777  * @param rmrMessageBuffer
1778  */
1779 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1780                       Sctp_Map_t *sctpMap,
1781                       ReportingMessages_t &message,
1782                       RmrMessagesBuffer_t &rmrMessageBuffer) {
1783     auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1784     auto logLevel = mdclog_level_get();
1785     if (logLevel >= MDCLOG_INFO) {
1786         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1787     }
1788     switch (procedureCode) {
1789         case ProcedureCode_id_Reset: {
1790             if (logLevel >= MDCLOG_DEBUG) {
1791                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1792             }
1793             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1794             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1795             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1796                 break;
1797             }
1798             if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1799                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1800             }
1801             break;
1802         }
1803         case ProcedureCode_id_RICcontrol: {
1804             if (logLevel >= MDCLOG_DEBUG) {
1805                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1806             }
1807             for (auto i = 0;
1808                  i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1809                 auto messageSent = false;
1810                 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1811                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1812                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1813                 }
1814                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1815                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1816                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1817                     }
1818                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1819                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1820                         rmrMessageBuffer.sendMessage->state = 0;
1821 //                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1822                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1823
1824                         static unsigned char tx[32];
1825                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1826                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1827                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1828                                        (unsigned char *)message.message.enodbName,
1829                                        strlen(message.message.enodbName));
1830
1831                         message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1832                         message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1833                         sendRmrMessage(rmrMessageBuffer, message);
1834                         messageSent = true;
1835                     } else {
1836                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1837                     }
1838                 }
1839                 if (messageSent) {
1840                     break;
1841                 }
1842             }
1843
1844             break;
1845         }
1846         case ProcedureCode_id_RICsubscription: {
1847             if (logLevel >= MDCLOG_DEBUG) {
1848                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1849             }
1850             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1851             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1852             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1853                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1854             }
1855             break;
1856         }
1857         case ProcedureCode_id_RICsubscriptionDelete: {
1858             if (logLevel >= MDCLOG_DEBUG) {
1859                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1860             }
1861             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1862             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1863             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1864                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1865             }
1866             break;
1867         }
1868         default: {
1869             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1870             message.message.messageType = 0; // no RMR message type yet
1871             buildJsonMessage(message);
1872
1873             break;
1874         }
1875     }
1876 }
1877
1878 /**
1879  *
1880  * @param pdu
1881  * @param message
1882  * @param rmrMessageBuffer
1883  */
1884 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1885                         Sctp_Map_t *sctpMap,
1886                         ReportingMessages_t &message,
1887                         RmrMessagesBuffer_t &rmrMessageBuffer) {
1888     auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1889     auto logLevel = mdclog_level_get();
1890     if (logLevel >= MDCLOG_INFO) {
1891         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1892     }
1893     switch (procedureCode) {
1894         case ProcedureCode_id_RICcontrol: {
1895             if (logLevel >= MDCLOG_DEBUG) {
1896                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1897             }
1898             for (int i = 0;
1899                  i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1900                 auto messageSent = false;
1901                 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1902                 if (logLevel >= MDCLOG_DEBUG) {
1903                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1904                 }
1905                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1906                     if (logLevel >= MDCLOG_DEBUG) {
1907                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1908                     }
1909                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1910                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1911                         rmrMessageBuffer.sendMessage->state = 0;
1912 //                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1913                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1914                         static unsigned char tx[32];
1915                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1916                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1917                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1918                                        strlen(message.message.enodbName));
1919                         message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1920                         message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1921                         sendRmrMessage(rmrMessageBuffer, message);
1922                         messageSent = true;
1923                     } else {
1924                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1925                     }
1926                 }
1927                 if (messageSent) {
1928                     break;
1929                 }
1930             }
1931             break;
1932         }
1933         case ProcedureCode_id_RICsubscription: {
1934             if (logLevel >= MDCLOG_DEBUG) {
1935                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1936             }
1937             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1938             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1939             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1940                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1941             }
1942             break;
1943         }
1944         case ProcedureCode_id_RICsubscriptionDelete: {
1945             if (logLevel >= MDCLOG_DEBUG) {
1946                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1947             }
1948             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1949             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1950             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1951                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1952             }
1953             break;
1954         }
1955         default: {
1956             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1957             message.message.messageType = 0; // no RMR message type yet
1958
1959             buildJsonMessage(message);
1960
1961             break;
1962         }
1963     }
1964 }
1965
1966 /**
1967  *
1968  * @param message
1969  * @param requestId
1970  * @param rmrMmessageBuffer
1971  * @return
1972  */
1973 int sendRequestToXapp(ReportingMessages_t &message,
1974                       int requestId,
1975                       RmrMessagesBuffer_t &rmrMmessageBuffer) {
1976     rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1977                    (unsigned char *)message.message.enodbName,
1978                    strlen(message.message.enodbName));
1979     message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1980     rmrMmessageBuffer.sendMessage->state = 0;
1981     static unsigned char tx[32];
1982     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1983     rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1984
1985     auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1986     return rc;
1987 }
1988
1989 /**
1990  *
1991  * @param pSctpParams
1992  */
1993 void getRmrContext(sctp_params_t &pSctpParams) {
1994     pSctpParams.rmrCtx = nullptr;
1995     pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1996     if (pSctpParams.rmrCtx == nullptr) {
1997         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1998         return;
1999     }
2000
2001     rmr_set_stimeout(pSctpParams.rmrCtx, 0);    // disable retries for any send operation
2002     // we need to find that routing table exist and we can run
2003     if (mdclog_level_get() >= MDCLOG_INFO) {
2004         mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2005     }
2006     int rmrReady = 0;
2007     int count = 0;
2008     while (!rmrReady) {
2009         if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2010             sleep(1);
2011         }
2012         count++;
2013         if (count % 60 == 0) {
2014             mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2015         }
2016     }
2017     if (mdclog_level_get() >= MDCLOG_INFO) {
2018         mdclog_write(MDCLOG_INFO, "RMR running");
2019     }
2020     rmr_init_trace(pSctpParams.rmrCtx, 200);
2021     // get the RMR fd for the epoll
2022     pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2023     struct epoll_event event{};
2024     // add RMR fd to epoll
2025     event.events = (EPOLLIN);
2026     event.data.fd = pSctpParams.rmrListenFd;
2027     // add listening RMR FD to epoll
2028     if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2029         mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2030         close(pSctpParams.rmrListenFd);
2031         rmr_close(pSctpParams.rmrCtx);
2032         pSctpParams.rmrCtx = nullptr;
2033     }
2034 }
2035
2036 /**
2037  *
2038  * @param message
2039  * @param rmrMessageBuffer
2040  * @return
2041  */
2042 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2043     E2AP_PDU_t *pdu = nullptr;
2044
2045     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2046         mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
2047                 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2048     }
2049     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2050                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2051     if (rval.code != RC_OK) {
2052         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
2053                      rval.code,
2054                      message.message.enodbName);
2055         return -1;
2056     }
2057
2058     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2059     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2060                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
2061     if (er.encoded == -1) {
2062         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2063         return -1;
2064     } else if (er.encoded > (ssize_t)buff_size) {
2065         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2066                      (int)rmrMessageBuffer.rcvMessage->len,
2067                      asn_DEF_E2AP_PDU.name,
2068                      __func__,
2069                      __LINE__);
2070         return -1;
2071     }
2072     rmrMessageBuffer.rcvMessage->len = er.encoded;
2073     return 0;
2074 }
2075
2076 /**
2077  *
2078  * @param sctpMap
2079  * @param rmrMessageBuffer
2080  * @param ts
2081  * @return
2082  */
2083 int receiveXappMessages(Sctp_Map_t *sctpMap,
2084                         RmrMessagesBuffer_t &rmrMessageBuffer,
2085                         struct timespec &ts) {
2086     if (rmrMessageBuffer.rcvMessage == nullptr) {
2087         //we have error
2088         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2089         return -1;
2090     }
2091
2092     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2093         mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2094     }
2095     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2096     if (rmrMessageBuffer.rcvMessage == nullptr) {
2097         mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2098         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2099         return -2;
2100     }
2101     ReportingMessages_t message;
2102     message.message.direction = 'D';
2103     message.message.time.tv_nsec = ts.tv_nsec;
2104     message.message.time.tv_sec = ts.tv_sec;
2105
2106     // get message payload
2107     //auto msgData = msg->payload;
2108     if (rmrMessageBuffer.rcvMessage->state != 0) {
2109         mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2110         return -1;
2111     }
2112     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2113     message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2114     if (message.peerInfo == nullptr) {
2115         auto type = rmrMessageBuffer.rcvMessage->mtype;
2116         switch (type) {
2117             case RIC_SCTP_CLEAR_ALL:
2118             case E2_TERM_KEEP_ALIVE_REQ:
2119             case RIC_HEALTH_CHECK_REQ:
2120                 break;
2121             default:
2122                 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2123                 return -1;
2124         }
2125     }
2126
2127     switch (rmrMessageBuffer.rcvMessage->mtype) {
2128         case RIC_E2_SETUP_RESP : {
2129             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2130                 break;
2131             }
2132             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2133             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2134             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2135                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2136                 return -6;
2137             }
2138             break;
2139         }
2140         case RIC_E2_SETUP_FAILURE : {
2141             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2142                 break;
2143             }
2144             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2145             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2146             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2147                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2148                 return -6;
2149             }
2150             break;
2151         }
2152         case RIC_ERROR_INDICATION: {
2153             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
2154             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2155             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2156                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2157                 return -6;
2158             }
2159             break;
2160         }
2161         case RIC_SUB_REQ: {
2162             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
2163             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2164             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2165                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2166                 return -6;
2167             }
2168             break;
2169         }
2170         case RIC_SUB_DEL_REQ: {
2171             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
2172             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2173             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2174                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2175                 return -6;
2176             }
2177             break;
2178         }
2179         case RIC_CONTROL_REQ: {
2180             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
2181             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2182             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2183                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2184                 return -6;
2185             }
2186             break;
2187         }
2188         case RIC_SERVICE_QUERY: {
2189             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2190                 break;
2191             }
2192             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment();
2193             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2194             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2195                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2196                 return -6;
2197             }
2198             break;
2199         }
2200         case RIC_SERVICE_UPDATE_ACK: {
2201             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2202                 break;
2203             }
2204             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2205             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2206             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2207                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2208                 return -6;
2209             }
2210             break;
2211         }
2212         case RIC_SERVICE_UPDATE_FAILURE: {
2213             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2214                 break;
2215             }
2216             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2217             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2218             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2219                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2220                 return -6;
2221             }
2222             break;
2223         }
2224         case RIC_E2_RESET_REQ: {
2225             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2226                 break;
2227             }
2228             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2229             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2230             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2231                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2232                 return -6;
2233             }
2234             break;
2235         }
2236         case RIC_E2_RESET_RESP: {
2237             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2238                 break;
2239             }
2240             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2241             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2242             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2243                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2244                 return -6;
2245             }
2246             break;
2247         }
2248         case RIC_SCTP_CLEAR_ALL: {
2249             mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2250             // loop on all keys and close socket and then erase all map.
2251             vector<char *> v;
2252             sctpMap->getKeys(v);
2253             for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2254                 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2255                     auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2256                     if (peerInfo == nullptr) {
2257                         continue;
2258                     }
2259                     close(peerInfo->fileDescriptor);
2260                     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2261                     message.message.direction = 'D';
2262                     message.message.time.tv_nsec = ts.tv_nsec;
2263                     message.message.time.tv_sec = ts.tv_sec;
2264
2265                     message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2266                             snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2267                                      256,
2268                                      "%s|RIC_SCTP_CLEAR_ALL",
2269                                      peerInfo->enodbName);
2270                     message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2271                     mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2272                     if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2273                         mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2274                     }
2275                     free(peerInfo);
2276                 }
2277             }
2278
2279             sleep(1);
2280             sctpMap->clear();
2281             break;
2282         }
2283         case E2_TERM_KEEP_ALIVE_REQ: {
2284             // send message back
2285             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2286                               (unsigned char *)rmrMessageBuffer.ka_message,
2287                               rmrMessageBuffer.ka_message_len);
2288             rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2289             rmrMessageBuffer.sendMessage->state = 0;
2290             static unsigned char tx[32];
2291             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2292             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2293             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2294             if (rmrMessageBuffer.sendMessage == nullptr) {
2295                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2296                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2297             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2298                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2299                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2300             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2301                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2302             }
2303
2304             break;
2305         }
2306         case RIC_HEALTH_CHECK_REQ: {
2307             // send message back
2308             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2309                               (unsigned char *)"OK",
2310                               2);
2311             rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2312             rmrMessageBuffer.sendMessage->state = 0;
2313             static unsigned char tx[32];
2314             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2315             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2316             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2317             if (rmrMessageBuffer.sendMessage == nullptr) {
2318                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2319                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2320             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2321                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2322                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2323             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2324                 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2325             }
2326
2327             break;
2328         }
2329
2330         default:
2331             mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2332             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2333             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2334             message.message.time.tv_nsec = ts.tv_nsec;
2335             message.message.time.tv_sec = ts.tv_sec;
2336             message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2337
2338             buildJsonMessage(message);
2339
2340
2341             return -7;
2342     }
2343     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2344         mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2345     }
2346     return 0;
2347 }
2348
2349 /**
2350  * Send message to the CU that is not expecting for successful or unsuccessful results
2351  * @param messageBuffer
2352  * @param message
2353  * @param failedMsgId
2354  * @param sctpMap
2355  * @return
2356  */
2357 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2358                            ReportingMessages_t &message,
2359                            int failedMsgId,
2360                            Sctp_Map_t *sctpMap) {
2361
2362     getRequestMetaData(message, messageBuffer);
2363     if (mdclog_level_get() >= MDCLOG_INFO) {
2364         mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2365     }
2366
2367     auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2368     return rc;
2369 }
2370
2371 /**
2372  *
2373  * @param sctpMap
2374  * @param messageBuffer
2375  * @param message
2376  * @param failedMesgId
2377  * @return
2378  */
2379 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2380                     RmrMessagesBuffer_t &messageBuffer,
2381                     ReportingMessages_t &message,
2382                     int failedMesgId) {
2383     // get the FD
2384     message.message.messageType = messageBuffer.rcvMessage->mtype;
2385     auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2386     return rc;
2387 }
2388
2389 /**
2390  *
2391  * @param rmrCtx the rmr context to send and receive
2392  * @param msg the msg we got fromxApp
2393  * @param metaData data from xApp in ordered struct
2394  * @param failedMesgId the return message type error
2395  */
2396 void
2397 sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
2398     rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
2399     msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
2400                         message.message.enodbName);
2401     if (mdclog_level_get() >= MDCLOG_INFO) {
2402         mdclog_write(MDCLOG_INFO, "%s", msg->payload);
2403     }
2404     msg->mtype = failedMesgId;
2405     msg->state = 0;
2406
2407     static unsigned char tx[32];
2408     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2409     rmr_bytes2xact(msg, tx, strlen((const char *) tx));
2410
2411     sendRmrMessage(rmrMessageBuffer, message);
2412 }
2413
2414
2415
2416 /**
2417  *
2418  * @param epoll_fd
2419  * @param peerInfo
2420  * @param events
2421  * @param sctpMap
2422  * @param enodbName
2423  * @param msgType
2424  * @return
2425  */
2426 int addToEpoll(int epoll_fd,
2427                ConnectedCU_t *peerInfo,
2428                uint32_t events,
2429                Sctp_Map_t *sctpMap,
2430                char *enodbName,
2431                int msgType) {
2432     // Add to Epol
2433     struct epoll_event event{};
2434     event.data.ptr = peerInfo;
2435     event.events = events;
2436     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2437         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2438             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2439                          strerror(errno), __func__, __LINE__);
2440         }
2441         close(peerInfo->fileDescriptor);
2442         if (enodbName != nullptr) {
2443             cleanHashEntry(peerInfo, sctpMap);
2444             char key[MAX_ENODB_NAME_SIZE * 2];
2445             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2446             if (mdclog_level_get() >= MDCLOG_DEBUG) {
2447                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2448             }
2449             auto tmp = sctpMap->find(key);
2450             if (tmp) {
2451                 free(tmp);
2452                 sctpMap->erase(key);
2453             }
2454         } else {
2455             peerInfo->enodbName[0] = 0;
2456         }
2457         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2458         return -1;
2459     }
2460     return 0;
2461 }
2462
2463 /**
2464  *
2465  * @param epoll_fd
2466  * @param peerInfo
2467  * @param events
2468  * @param sctpMap
2469  * @param enodbName
2470  * @param msgType
2471  * @return
2472  */
2473 int modifyToEpoll(int epoll_fd,
2474                   ConnectedCU_t *peerInfo,
2475                   uint32_t events,
2476                   Sctp_Map_t *sctpMap,
2477                   char *enodbName,
2478                   int msgType) {
2479     // Add to Epol
2480     struct epoll_event event{};
2481     event.data.ptr = peerInfo;
2482     event.events = events;
2483     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2484         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2485             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2486                          strerror(errno), __func__, __LINE__);
2487         }
2488         close(peerInfo->fileDescriptor);
2489         cleanHashEntry(peerInfo, sctpMap);
2490         char key[MAX_ENODB_NAME_SIZE * 2];
2491         snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2492         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2493             mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2494         }
2495         auto tmp = sctpMap->find(key);
2496         if (tmp) {
2497             free(tmp);
2498         }
2499         sctpMap->erase(key);
2500         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2501         return -1;
2502     }
2503     return 0;
2504 }
2505
2506
2507 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2508     buildJsonMessage(message);
2509
2510     rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2511
2512     if (rmrMessageBuffer.sendMessage == nullptr) {
2513         rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2514         mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2515         return -1;
2516     }
2517
2518     if (rmrMessageBuffer.sendMessage->state != 0) {
2519         char meid[RMR_MAX_MEID]{};
2520         if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2521             usleep(5);
2522             rmrMessageBuffer.sendMessage->state = 0;
2523             mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2524                          rmrMessageBuffer.sendMessage->mtype,
2525                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2526             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2527             if (rmrMessageBuffer.sendMessage == nullptr) {
2528                 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2529                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2530                 return -1;
2531             } else if (rmrMessageBuffer.sendMessage->state != 0) {
2532                 mdclog_write(MDCLOG_ERR,
2533                              "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2534                              translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2535                              rmrMessageBuffer.sendMessage->mtype,
2536                              rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2537                 auto rc = rmrMessageBuffer.sendMessage->state;
2538                 return rc;
2539             }
2540         } else {
2541             mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2542                          translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2543                          rmrMessageBuffer.sendMessage->mtype,
2544                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2545             return rmrMessageBuffer.sendMessage->state;
2546         }
2547     }
2548     return 0;
2549 }
2550
2551 void buildJsonMessage(ReportingMessages_t &message) {
2552     if (jsonTrace) {
2553         message.outLen = sizeof(message.base64Data);
2554         base64::encode((const unsigned char *) message.message.asndata,
2555                        (const int) message.message.asnLength,
2556                        message.base64Data,
2557                        message.outLen);
2558         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2559             mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2560                          (int) message.message.asnLength,
2561                          (int) message.outLen);
2562         }
2563
2564         snprintf(message.buffer, sizeof(message.buffer),
2565                  "{\"header\": {\"ts\": \"%ld.%09ld\","
2566                  "\"ranName\": \"%s\","
2567                  "\"messageType\": %d,"
2568                  "\"direction\": \"%c\"},"
2569                  "\"base64Length\": %d,"
2570                  "\"asnBase64\": \"%s\"}",
2571                  message.message.time.tv_sec,
2572                  message.message.time.tv_nsec,
2573                  message.message.enodbName,
2574                  message.message.messageType,
2575                  message.message.direction,
2576                  (int) message.outLen,
2577                  message.base64Data);
2578         static src::logger_mt &lg = my_logger::get();
2579
2580         BOOST_LOG(lg) << message.buffer;
2581     }
2582 }
2583
2584
2585 /**
2586  * take RMR error code to string
2587  * @param state
2588  * @return
2589  */
2590 string translateRmrErrorMessages(int state) {
2591     string str = {};
2592     switch (state) {
2593         case RMR_OK:
2594             str = "RMR_OK - state is good";
2595             break;
2596         case RMR_ERR_BADARG:
2597             str = "RMR_ERR_BADARG - argument passd to function was unusable";
2598             break;
2599         case RMR_ERR_NOENDPT:
2600             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2601             break;
2602         case RMR_ERR_EMPTY:
2603             str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2604             break;
2605         case RMR_ERR_NOHDR:
2606             str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2607             break;
2608         case RMR_ERR_SENDFAILED:
2609             str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2610             break;
2611         case RMR_ERR_CALLFAILED:
2612             str = "RMR_ERR_CALLFAILED - unable to send call() message";
2613             break;
2614         case RMR_ERR_NOWHOPEN:
2615             str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2616             break;
2617         case RMR_ERR_WHID:
2618             str = "RMR_ERR_WHID - wormhole id was invalid";
2619             break;
2620         case RMR_ERR_OVERFLOW:
2621             str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2622             break;
2623         case RMR_ERR_RETRY:
2624             str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2625             break;
2626         case RMR_ERR_RCVFAILED:
2627             str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2628             break;
2629         case RMR_ERR_TIMEOUT:
2630             str = "RMR_ERR_TIMEOUT - message processing call timed out";
2631             break;
2632         case RMR_ERR_UNSET:
2633             str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2634             break;
2635         case RMR_ERR_TRUNC:
2636             str = "RMR_ERR_TRUNC - received message likely truncated";
2637             break;
2638         case RMR_ERR_INITFAILED:
2639             str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2640             break;
2641         case RMR_ERR_NOTSUPP:
2642             str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2643             break;
2644         default:
2645             char buf[128]{};
2646             snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
2647             str = buf;
2648             break;
2649     }
2650     return str;
2651 }
2652
2653