Add R5 content to master
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 //  This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 //  platform project (RICP).
18
19 // TODO: High-level file comment.
20
21
22
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
26
27 #include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 #include "BuildXml.h"
29 #include "pugixml/src/pugixml.hpp"
30
31 using namespace std;
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
35
36
37 //#ifdef __cplusplus
38 //extern "C"
39 //{
40 //#endif
41
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
44
45 static void catch_function(int signal) {
46     __gcov_flush();
47     exit(signal);
48 }
49
50
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
52
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
56
57 void init_log() {
58     mdclog_attr_t *attr;
59     mdclog_attr_init(&attr);
60     mdclog_attr_set_ident(attr, "E2Terminator");
61     mdclog_init(attr);
62     mdclog_attr_destroy(attr);
63 }
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
66
67 double age() {
68     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
69 }
70
71 double approx_CPU_MHz(unsigned sleeptime) {
72     using namespace std::chrono_literals;
73     uint32_t aux = 0;
74     uint64_t cycles_start = rdtscp(aux);
75     double time_start = age();
76     std::this_thread::sleep_for(sleeptime * 1ms);
77     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78     double elapsed_time = age() - time_start;
79     return elapsed_cycles / elapsed_time;
80 }
81
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
86
87 int buildListeningPort(sctp_params_t &sctpParams) {
88     sctpParams.listenFD = socket (AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89     struct sockaddr_in6 servaddr {};
90     servaddr.sin6_family = AF_INET6;
91     servaddr.sin6_addr   = in6addr_any;
92     servaddr.sin6_port = htons(sctpParams.sctpPort);
93     if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
94         mdclog_write(MDCLOG_ERR, "Error binding. %s\n", strerror(errno));
95         return -1;
96     }
97     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
98         //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
99         return -1;
100     }
101     if (mdclog_level_get() >= MDCLOG_DEBUG) {
102         struct sockaddr_in6 cliaddr {};
103         socklen_t len = sizeof(cliaddr);
104         getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
105         char buff[1024] {};
106         inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
107         mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
108     }
109
110     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
111         mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
112         return -1;
113     }
114     struct epoll_event event {};
115     event.events = EPOLLIN | EPOLLET;
116     event.data.fd = sctpParams.listenFD;
117
118     // add listening port to epoll
119     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
120         printf("Failed to add descriptor to epoll\n");
121         mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
122         return -1;
123     }
124
125     return 0;
126 }
127
128 int buildConfiguration(sctp_params_t &sctpParams) {
129     path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
130     if (exists(p)) {
131         const int size = 2048;
132         auto fileSize = file_size(p);
133         if (fileSize > size) {
134             mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
135             return -1;
136         }
137     } else {
138         mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
139         return -1;
140     }
141
142     ReadConfigFile conf;
143     if (conf.openConfigFile(p.string()) == -1) {
144         mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
145                      p.string().c_str(), strerror(errno));
146         return -1;
147     }
148     int rmrPort = conf.getIntValue("nano");
149     if (rmrPort == -1) {
150         mdclog_write(MDCLOG_ERR, "illigal RMR port ");
151         return -1;
152     }
153     sctpParams.rmrPort = (uint16_t)rmrPort;
154     snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
155
156     auto tmpStr = conf.getStringValue("loglevel");
157     if (tmpStr.length() == 0) {
158         mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
159         tmpStr = "info";
160     }
161     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
162
163     if ((tmpStr.compare("debug")) == 0) {
164         sctpParams.logLevel = MDCLOG_DEBUG;
165     } else if ((tmpStr.compare("info")) == 0) {
166         sctpParams.logLevel = MDCLOG_INFO;
167     } else if ((tmpStr.compare("warning")) == 0) {
168         sctpParams.logLevel = MDCLOG_WARN;
169     } else if ((tmpStr.compare("error")) == 0) {
170         sctpParams.logLevel = MDCLOG_ERR;
171     } else {
172         mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
173         sctpParams.logLevel = MDCLOG_INFO;
174     }
175     mdclog_level_set(sctpParams.logLevel);
176
177     tmpStr = conf.getStringValue("volume");
178     if (tmpStr.length() == 0) {
179         mdclog_write(MDCLOG_ERR, "illigal volume.");
180         return -1;
181     }
182
183     char tmpLogFilespec[VOLUME_URL_SIZE];
184     tmpLogFilespec[0] = 0;
185     sctpParams.volume[0] = 0;
186     snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
187     // copy the name to temp file as well
188     snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
189
190
191     // define the file name in the tmp directory under the volume
192     strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
193
194     sctpParams.myIP = conf.getStringValue("local-ip");
195     if (sctpParams.myIP.length() == 0) {
196         mdclog_write(MDCLOG_ERR, "illigal local-ip.");
197         return -1;
198     }
199
200     int sctpPort = conf.getIntValue("sctp-port");
201     if (sctpPort == -1) {
202         mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
203         return -1;
204     }
205     sctpParams.sctpPort = (uint16_t)sctpPort;
206
207     sctpParams.fqdn = conf.getStringValue("external-fqdn");
208     if (sctpParams.fqdn.length() == 0) {
209         mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
210         return -1;
211     }
212
213     std::string pod = conf.getStringValue("pod_name");
214     if (pod.length() == 0) {
215         mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
216         return -1;
217     }
218     auto *podName = getenv(pod.c_str());
219     if (podName == nullptr) {
220         mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
221         return -1;
222
223     } else {
224         sctpParams.podName.assign(podName);
225         if (sctpParams.podName.length() == 0) {
226             mdclog_write(MDCLOG_ERR, "illigal pod_name");
227             return -1;
228         }
229     }
230
231     tmpStr = conf.getStringValue("trace");
232     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
233     if ((tmpStr.compare("start")) == 0) {
234         mdclog_write(MDCLOG_INFO, "Trace set to: start");
235         sctpParams.trace = true;
236     } else if ((tmpStr.compare("stop")) == 0) {
237         mdclog_write(MDCLOG_INFO, "Trace set to: stop");
238         sctpParams.trace = false;
239     }
240     jsonTrace = sctpParams.trace;
241
242     sctpParams.epollTimeOut = -1;
243     tmpStr = conf.getStringValue("prometheusMode");
244     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
245     if (tmpStr.length() != 0) {
246         if (tmpStr.compare("push") == 0) {
247             sctpParams.prometheusPushAddress = tmpStr;
248             auto timeout = conf.getIntValue("prometheusPushTimeOut");
249             if (timeout >= 5 && timeout <= 300) {
250                 sctpParams.epollTimeOut = timeout * 1000;
251             } else {
252                 sctpParams.epollTimeOut = 10 * 1000;
253             }
254         }
255     }
256
257     tmpStr = conf.getStringValue("prometheusPushAddr");
258     if (tmpStr.length() != 0) {
259         sctpParams.prometheusMode = tmpStr;
260     }
261
262     tmpStr = conf.getStringValue("prometheusPort");
263     if (tmpStr.length() != 0) {
264         sctpParams.prometheusPort = tmpStr;
265     }
266
267     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
268                                                                                     "\"fqdn\": \"%s\","
269                                                                                     "\"pod_name\": \"%s\"}",
270                                             (const char *)sctpParams.myIP.c_str(),
271                                             sctpParams.rmrPort,
272                                             sctpParams.fqdn.c_str(),
273                                             sctpParams.podName.c_str());
274
275     if (mdclog_level_get() >= MDCLOG_INFO) {
276         mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
277         mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
278         mdclog_mdc_add("volume", sctpParams.volume);
279         mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
280         mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
281         mdclog_mdc_add("pod name", sctpParams.podName.c_str());
282
283         mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
284     }
285     mdclog_mdc_clean();
286
287     // Files written to the current working directory
288     boostLogger = logging::add_file_log(
289             keywords::file_name = tmpLogFilespec, // to temp directory
290             keywords::rotation_size = 10 * 1024 * 1024,
291             keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
292             keywords::format = "%Message%"
293             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
294     );
295
296     // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
297     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
298             keywords::target = sctpParams.volume
299     ));
300
301     // Upon restart, scan the directory for files matching the file_name pattern
302     boostLogger->locked_backend()->scan_for_files();
303
304     // Enable auto-flushing after each tmpStr record written
305     if (mdclog_level_get() >= MDCLOG_DEBUG) {
306         boostLogger->locked_backend()->auto_flush(true);
307     }
308
309     return 0;
310 }
311
312 static std::string GetHostName() {
313     char hostname[1024];
314
315     if (::gethostname(hostname, sizeof(hostname))) {
316         return {};
317     }
318     return hostname;
319 }
320
321
322
323 int main(const int argc, char **argv) {
324     sctp_params_t sctpParams;
325
326     {
327         std::random_device device{};
328         std::mt19937 generator(device());
329         std::uniform_int_distribution<long> distribution(1, (long) 1e12);
330         transactionCounter = distribution(generator);
331     }
332
333 //    uint64_t st = 0;
334 //    uint32_t aux1 = 0;
335 //   st = rdtscp(aux1);
336
337     unsigned num_cpus = std::thread::hardware_concurrency();
338     init_log();
339     mdclog_level_set(MDCLOG_INFO);
340
341     if (std::signal(SIGINT, catch_function) == SIG_ERR) {
342         mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
343         exit(1);
344     }
345     if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
346         mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
347         exit(1);
348     }
349     if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
350         mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
351         exit(1);
352     }
353
354     cpuClock = approx_CPU_MHz(100);
355
356     mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
357
358     auto result = parse(argc, argv, sctpParams);
359
360     if (buildConfiguration(sctpParams) != 0) {
361         exit(-1);
362     }
363
364     //auto registry = std::make_shared<Registry>();
365     sctpParams.prometheusRegistry = std::make_shared<Registry>();
366
367     //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
368
369     sctpParams.prometheusFamily = &BuildCounter()
370             .Name("E2T")
371             .Help("E2T message counter")
372             .Labels({{"E", sctpParams.podName}})
373             .Register(*sctpParams.prometheusRegistry);
374
375
376     // start epoll
377     sctpParams.epoll_fd = epoll_create1(0);
378     if (sctpParams.epoll_fd == -1) {
379         mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
380         exit(-1);
381     }
382
383     getRmrContext(sctpParams);
384     if (sctpParams.rmrCtx == nullptr) {
385         close(sctpParams.epoll_fd);
386         exit(-1);
387     }
388
389     if (buildInotify(sctpParams) == -1) {
390         close(sctpParams.rmrListenFd);
391         rmr_close(sctpParams.rmrCtx);
392         close(sctpParams.epoll_fd);
393         exit(-1);
394     }
395
396     if (buildListeningPort(sctpParams) != 0) {
397         close(sctpParams.rmrListenFd);
398         rmr_close(sctpParams.rmrCtx);
399         close(sctpParams.epoll_fd);
400         exit(-1);
401     }
402
403     sctpParams.sctpMap = new mapWrapper();
404
405     std::vector<std::thread> threads(num_cpus);
406 //    std::vector<std::thread> threads;
407
408     if (sctpParams.prometheusMode.compare("pull") == 0) {
409         sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
410         sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
411     } else if (sctpParams.prometheusMode.compare("push") == 0) {
412         const auto labels = Gateway::GetInstanceLabel(GetHostName());
413         string address {};
414         string port {};
415         char ch = ':';
416         auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
417         // If string doesn't have
418         // character ch present in it
419         if (found != string::npos) {
420             address = sctpParams.prometheusPushAddress.substr(0,found);
421             port = sctpParams.prometheusPushAddress.substr(found + 1);
422             sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
423             sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
424         } else {
425             mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
426         }
427     }
428
429     num_cpus = 1;
430     for (unsigned int i = 0; i < num_cpus; i++) {
431         threads[i] = std::thread(listener, &sctpParams);
432
433         cpu_set_t cpuset;
434         CPU_ZERO(&cpuset);
435         CPU_SET(i, &cpuset);
436         int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
437         if (rc != 0) {
438             mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
439         }
440     }
441
442
443     //loop over term_init until first message from xApp
444     handleTermInit(sctpParams);
445
446     for (auto &t : threads) {
447         t.join();
448     }
449
450     return 0;
451 }
452
453 void handleTermInit(sctp_params_t &sctpParams) {
454     sendTermInit(sctpParams);
455     //send to e2 manager init of e2 term
456     //E2_TERM_INIT
457
458     int count = 0;
459     while (true) {
460         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
461         if (xappMessages > 0) {
462             if (mdclog_level_get() >=  MDCLOG_INFO) {
463                 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
464             }
465             return;
466         }
467         usleep(100000);
468         count++;
469         if (count % 1000 == 0) {
470             mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
471             sendTermInit(sctpParams);
472         }
473     }
474 }
475
476 void sendTermInit(sctp_params_t &sctpParams) {
477     rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
478     auto count = 0;
479     while (true) {
480         msg->mtype = E2_TERM_INIT;
481         msg->state = 0;
482         rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
483         static unsigned char tx[32];
484         auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
485         rmr_bytes2xact(msg, tx, txLen);
486         msg = rmr_send_msg(sctpParams.rmrCtx, msg);
487         if (msg == nullptr) {
488             msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
489         } else if (msg->state == 0) {
490             rmr_free_msg(msg);
491             if (mdclog_level_get() >=  MDCLOG_INFO) {
492                 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
493             }
494             return;
495         } else {
496             if (count % 100 == 0) {
497                 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
498             }
499             sleep(1);
500         }
501         count++;
502     }
503 }
504
505 /**
506  *
507  * @param argc
508  * @param argv
509  * @param sctpParams
510  * @return
511  */
512 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
513     cxxopts::Options options(argv[0], "e2 term help");
514     options.positional_help("[optional args]").show_positional_help();
515     options.allow_unrecognised_options().add_options()
516             ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
517             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
518             ("h,help", "Print help");
519
520     auto result = options.parse(argc, argv);
521
522     if (result.count("help")) {
523         std::cout << options.help({""}) << std::endl;
524         exit(0);
525     }
526     return result;
527 }
528
529 /**
530  *
531  * @param sctpParams
532  * @return -1 failed 0 success
533  */
534 int buildInotify(sctp_params_t &sctpParams) {
535     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
536     if (sctpParams.inotifyFD == -1) {
537         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
538         close(sctpParams.rmrListenFd);
539         rmr_close(sctpParams.rmrCtx);
540         close(sctpParams.epoll_fd);
541         return -1;
542     }
543
544     sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
545                                              (const char *)sctpParams.configFilePath.c_str(),
546                                              (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
547     if (sctpParams.inotifyWD == -1) {
548         mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to  inotify (inotify_add_watch) %s",
549                      sctpParams.configFilePath.c_str(),
550                      strerror(errno));
551         close(sctpParams.inotifyFD);
552         return -1;
553     }
554
555     struct epoll_event event{};
556     event.events = (EPOLLIN);
557     event.data.fd = sctpParams.inotifyFD;
558     // add listening RMR FD to epoll
559     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
560         mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
561         close(sctpParams.inotifyFD);
562         return -1;
563     }
564     return 0;
565 }
566
567 /**
568  *
569  * @param args
570  * @return
571  */
572 void listener(sctp_params_t *params) {
573     int num_of_SCTP_messages = 0;
574     auto totalTime = 0.0;
575     mdclog_mdc_clean();
576     mdclog_level_set(params->logLevel);
577
578     std::thread::id this_id = std::this_thread::get_id();
579     //save cout
580     streambuf *oldCout = cout.rdbuf();
581     ostringstream memCout;
582     // create new cout
583     cout.rdbuf(memCout.rdbuf());
584     cout << this_id;
585     //return to the normal cout
586     cout.rdbuf(oldCout);
587
588     char tid[32];
589     memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
590     tid[memCout.str().length()] = 0;
591     mdclog_mdc_add("thread id", tid);
592
593     if (mdclog_level_get() >= MDCLOG_DEBUG) {
594         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
595     }
596
597     RmrMessagesBuffer_t rmrMessageBuffer{};
598     //create and init RMR
599     rmrMessageBuffer.rmrCtx = params->rmrCtx;
600
601     auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
602     struct timespec end{0, 0};
603     struct timespec start{0, 0};
604
605     rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
606     rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
607
608     memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
609     rmrMessageBuffer.ka_message_len = params->ka_message_length;
610     rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
611
612     if (mdclog_level_get() >= MDCLOG_DEBUG) {
613         mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
614     }
615
616     ReportingMessages_t message {};
617
618 //    for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
619 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
620 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
621 //    }
622
623     bool gatewayflag = false;
624     while (true) {
625         future<int> gateWay;
626
627         if (mdclog_level_get() >= MDCLOG_DEBUG) {
628             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
629         }
630         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
631         if (numOfEvents == 0) {
632             if (params->prometheusGateway != nullptr) {
633                 gateWay = params->prometheusGateway->AsyncPush();
634                 gatewayflag = true;
635             }
636             continue;
637         } else if (numOfEvents < 0) {
638             if (errno == EINTR) {
639                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
640                     mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
641                 }
642                 continue;
643             }
644             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
645             return;
646         }
647         if (gatewayflag) {
648             gatewayflag = false;
649             auto rc = gateWay.get();
650             if (rc != 200) {
651                 mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
652             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
653                 mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
654             }
655         }
656         for (auto i = 0; i < numOfEvents; i++) {
657             if (mdclog_level_get() >= MDCLOG_DEBUG) {
658                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
659             }
660             clock_gettime(CLOCK_MONOTONIC, &message.message.time);
661             start.tv_sec = message.message.time.tv_sec;
662             start.tv_nsec = message.message.time.tv_nsec;
663
664
665             if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
666                 handlepoll_error(events[i], message, rmrMessageBuffer, params);
667             } else if (events[i].events & EPOLLOUT) {
668                 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
669             } else if (params->listenFD == events[i].data.fd) {
670                 if (mdclog_level_get() >= MDCLOG_INFO) {
671                     mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
672                 }
673                 // new connection is requested from RAN  start build connection
674                 while (true) {
675                     struct sockaddr in_addr {};
676                     socklen_t in_len;
677                     char hostBuff[NI_MAXHOST];
678                     char portBuff[NI_MAXSERV];
679
680                     in_len = sizeof(in_addr);
681                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
682                     peerInfo->sctpParams = params;
683                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
684                     if (peerInfo->fileDescriptor == -1) {
685                         if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
686                             /* We have processed all incoming connections. */
687                             break;
688                         } else {
689                             mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
690                             break;
691                         }
692                     }
693                     if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
694                         mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
695                         close(peerInfo->fileDescriptor);
696                         break;
697                     }
698                     auto  ans = getnameinfo(&in_addr, in_len,
699                                             peerInfo->hostName, NI_MAXHOST,
700                                             peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
701                     if (ans < 0) {
702                         mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
703                         close(peerInfo->fileDescriptor);
704                         break;
705                     }
706                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
707                         mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
708                     }
709                     peerInfo->isConnected = false;
710                     peerInfo->gotSetup = false;
711                     if (addToEpoll(params->epoll_fd,
712                                    peerInfo,
713                                    (EPOLLIN | EPOLLET),
714                                    params->sctpMap, nullptr,
715                                    0) != 0) {
716                         break;
717                     }
718                     break;
719                 }
720             } else if (params->rmrListenFd == events[i].data.fd) {
721                 // got message from XAPP
722                 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
723                 num_of_messages.fetch_add(1, std::memory_order_release);
724                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
725                     mdclog_write(MDCLOG_DEBUG, "new message from RMR");
726                 }
727                 if (receiveXappMessages(params->sctpMap,
728                                         rmrMessageBuffer,
729                                         message.message.time) != 0) {
730                     mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
731                 }
732             } else if (params->inotifyFD == events[i].data.fd) {
733                 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
734                 handleConfigChange(params);
735             } else {
736                 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
737                  * We must read whatever data is available completely, as we are running
738                  *  in edge-triggered mode and won't get a notification again for the same data. */
739                 num_of_messages.fetch_add(1, std::memory_order_release);
740                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
741                     mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
742                 }
743                 receiveDataFromSctp(&events[i],
744                                     params->sctpMap,
745                                     num_of_SCTP_messages,
746                                     rmrMessageBuffer,
747                                     message.message.time);
748             }
749
750             clock_gettime(CLOCK_MONOTONIC, &end);
751             if (mdclog_level_get() >= MDCLOG_INFO) {
752                 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
753                               ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
754             }
755             if (mdclog_level_get() >= MDCLOG_DEBUG) {
756                 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
757                              end.tv_sec - start.tv_sec,
758                              end.tv_nsec - start.tv_nsec);
759             }
760         }
761     }
762 }
763
764 /**
765  *
766  * @param sctpParams
767  */
768 void handleConfigChange(sctp_params_t *sctpParams) {
769     char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
770     const struct inotify_event *event;
771     char *ptr;
772
773     path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
774     auto endlessLoop = true;
775     while (endlessLoop) {
776         auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
777         if (len == -1) {
778             if (errno != EAGAIN) {
779                 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
780                 endlessLoop = false;
781                 continue;
782             }
783             else {
784                 endlessLoop = false;
785                 continue;
786             }
787         }
788
789         for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
790             event = (const struct inotify_event *)ptr;
791             if (event->mask & (uint32_t)IN_ISDIR) {
792                 continue;
793             }
794
795             // the directory name
796             if (sctpParams->inotifyWD == event->wd) {
797                 // not the directory
798             }
799             if (event->len) {
800                 auto  retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
801                 if (retVal != 0) {
802                     continue;
803                 }
804             }
805             // only the file we want
806             if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
807                 if (mdclog_level_get() >= MDCLOG_INFO) {
808                     mdclog_write(MDCLOG_INFO, "Configuration file changed");
809                 }
810                 if (exists(p)) {
811                     const int size = 2048;
812                     auto fileSize = file_size(p);
813                     if (fileSize > size) {
814                         mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
815                         return;
816                     }
817                 } else {
818                     mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
819                     return;
820                 }
821
822                 ReadConfigFile conf;
823                 if (conf.openConfigFile(p.string()) == -1) {
824                     mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
825                                  p.string().c_str(), strerror(errno));
826                     return;
827                 }
828
829                 auto tmpStr = conf.getStringValue("loglevel");
830                 if (tmpStr.length() == 0) {
831                     mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
832                     tmpStr = "info";
833                 }
834                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
835
836                 if ((tmpStr.compare("debug")) == 0) {
837                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
838                     sctpParams->logLevel = MDCLOG_DEBUG;
839                 } else if ((tmpStr.compare("info")) == 0) {
840                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
841                     sctpParams->logLevel = MDCLOG_INFO;
842                 } else if ((tmpStr.compare("warning")) == 0) {
843                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
844                     sctpParams->logLevel = MDCLOG_WARN;
845                 } else if ((tmpStr.compare("error")) == 0) {
846                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
847                     sctpParams->logLevel = MDCLOG_ERR;
848                 } else {
849                     mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
850                     sctpParams->logLevel = MDCLOG_INFO;
851                 }
852                 mdclog_level_set(sctpParams->logLevel);
853
854
855                 tmpStr = conf.getStringValue("trace");
856                 if (tmpStr.length() == 0) {
857                     mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
858                     tmpStr = "stop";
859                 }
860
861                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
862                 if ((tmpStr.compare("start")) == 0) {
863                     mdclog_write(MDCLOG_INFO, "Trace set to: start");
864                     sctpParams->trace = true;
865                 } else if ((tmpStr.compare("stop")) == 0) {
866                     mdclog_write(MDCLOG_INFO, "Trace set to: stop");
867                     sctpParams->trace = false;
868                 } else {
869                     mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
870                     sctpParams->trace = false;
871                 }
872                 jsonTrace = sctpParams->trace;
873
874                 if (sctpParams->prometheusMode.compare("push") == 0) {
875                     auto timeout = conf.getIntValue("prometheusPushTimeOut");
876                     if (timeout >= 5 && timeout <= 300) {
877                         sctpParams->epollTimeOut = timeout * 1000;
878                     } else {
879                         mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
880                                      timeout);
881                     }
882                 }
883
884                 endlessLoop = false;
885             }
886         }
887     }
888 }
889
890 /**
891  *
892  * @param event
893  * @param message
894  * @param rmrMessageBuffer
895  * @param params
896  */
897 void handleEinprogressMessages(struct epoll_event &event,
898                                ReportingMessages_t &message,
899                                RmrMessagesBuffer_t &rmrMessageBuffer,
900                                sctp_params_t *params) {
901     auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
902     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
903
904     mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
905     auto retVal = 0;
906     socklen_t retValLen = 0;
907     auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
908     if (rc != 0 || retVal != 0) {
909         if (rc != 0) {
910             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
911                                                          "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
912                                                          peerInfo->enodbName, strerror(errno));
913         } else if (retVal != 0) {
914             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
915                                                          "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
916                                                          peerInfo->enodbName);
917         }
918
919         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
920         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
921         mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
922         message.message.direction = 'N';
923         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
924             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
925         }
926         memset(peerInfo->asnData, 0, peerInfo->asnLength);
927         peerInfo->asnLength = 0;
928         peerInfo->mtype = 0;
929         return;
930     }
931
932     peerInfo->isConnected = true;
933
934     if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
935                       peerInfo->mtype) != 0) {
936         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
937         return;
938     }
939
940     message.message.asndata = (unsigned char *)peerInfo->asnData;
941     message.message.asnLength = peerInfo->asnLength;
942     message.message.messageType = peerInfo->mtype;
943     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
944     num_of_messages.fetch_add(1, std::memory_order_release);
945     if (mdclog_level_get() >= MDCLOG_DEBUG) {
946         mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
947                      message.message.enodbName);
948     }
949     if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
950         if (mdclog_level_get() >= MDCLOG_DEBUG) {
951             mdclog_write(MDCLOG_DEBUG, "Error write to SCTP  %s %d", __func__, __LINE__);
952         }
953         return;
954     }
955
956     memset(peerInfo->asnData, 0, peerInfo->asnLength);
957     peerInfo->asnLength = 0;
958     peerInfo->mtype = 0;
959 }
960
961
962 void handlepoll_error(struct epoll_event &event,
963                       ReportingMessages_t &message,
964                       RmrMessagesBuffer_t &rmrMessageBuffer,
965                       sctp_params_t *params) {
966     if (event.data.fd != params->rmrListenFd) {
967         auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
968         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
969                      event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
970
971         rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
972                                                      "%s|Failed SCTP Connection",
973                                                      peerInfo->enodbName);
974         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
975         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
976
977         memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
978         message.message.direction = 'N';
979         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
980             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
981         }
982
983         close(peerInfo->fileDescriptor);
984         params->sctpMap->erase(peerInfo->enodbName);
985         cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
986     } else {
987         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
988     }
989 }
990 /**
991  *
992  * @param socket
993  * @return
994  */
995 int setSocketNoBlocking(int socket) {
996     auto flags = fcntl(socket, F_GETFL, 0);
997
998     if (flags == -1) {
999         mdclog_mdc_add("func", "fcntl");
1000         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1001         mdclog_mdc_clean();
1002         return -1;
1003     }
1004
1005     flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1006     if (fcntl(socket, F_SETFL, flags) == -1) {
1007         mdclog_mdc_add("func", "fcntl");
1008         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1009         mdclog_mdc_clean();
1010         return -1;
1011     }
1012
1013     return 0;
1014 }
1015
1016 /**
1017  *
1018  * @param val
1019  * @param m
1020  */
1021 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1022     char *dummy;
1023     auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1024     char searchBuff[2048]{};
1025
1026     snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1027     m->erase(searchBuff);
1028
1029     m->erase(val->enodbName);
1030     free(val);
1031 }
1032
1033 /**
1034  *
1035  * @param fd file discriptor
1036  * @param data the asn data to send
1037  * @param len  length of the data
1038  * @param enodbName the enodbName as in the map for printing purpose
1039  * @param m map host information
1040  * @param mtype message number
1041  * @return 0 success, anegative number on fail
1042  */
1043 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1044     auto loglevel = mdclog_level_get();
1045     int fd = peerInfo->fileDescriptor;
1046     if (loglevel >= MDCLOG_DEBUG) {
1047         mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1048                      message.message.enodbName, __FUNCTION__);
1049     }
1050
1051     while (true) {
1052         if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1053             if (errno == EINTR) {
1054                 continue;
1055             }
1056             mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1057             if (!peerInfo->isConnected) {
1058                 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1059                 return -1;
1060             }
1061             cleanHashEntry(peerInfo, m);
1062             close(fd);
1063             char key[MAX_ENODB_NAME_SIZE * 2];
1064             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1065                      message.message.messageType);
1066             if (loglevel >= MDCLOG_DEBUG) {
1067                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1068             }
1069             auto tmp = m->find(key);
1070             if (tmp) {
1071                 free(tmp);
1072             }
1073             m->erase(key);
1074             return -1;
1075         }
1076         message.message.direction = 'D';
1077         // send report.buffer of size
1078         buildJsonMessage(message);
1079
1080         if (loglevel >= MDCLOG_DEBUG) {
1081             mdclog_write(MDCLOG_DEBUG,
1082                          "SCTP message for CU %s sent from %s",
1083                          message.message.enodbName,
1084                          __FUNCTION__);
1085         }
1086         return 0;
1087     }
1088 }
1089
1090 /**
1091  *
1092  * @param message
1093  * @param rmrMessageBuffer
1094  */
1095 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1096     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1097     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1098
1099     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1100         mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1101                      message.message.enodbName, (unsigned long) message.message.asnLength);
1102     }
1103 }
1104
1105
1106
1107 /**
1108  *
1109  * @param events
1110  * @param sctpMap
1111  * @param numOfMessages
1112  * @param rmrMessageBuffer
1113  * @param ts
1114  * @return
1115  */
1116 int receiveDataFromSctp(struct epoll_event *events,
1117                         Sctp_Map_t *sctpMap,
1118                         int &numOfMessages,
1119                         RmrMessagesBuffer_t &rmrMessageBuffer,
1120                         struct timespec &ts) {
1121     /* We have data on the fd waiting to be read. Read and display it.
1122  * We must read whatever data is available completely, as we are running
1123  *  in edge-triggered mode and won't get a notification again for the same data. */
1124     ReportingMessages_t message {};
1125     auto done = 0;
1126     auto loglevel = mdclog_level_get();
1127
1128     // get the identity of the interface
1129     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1130
1131     struct timespec start{0, 0};
1132     struct timespec decodestart{0, 0};
1133     struct timespec end{0, 0};
1134
1135     E2AP_PDU_t *pdu = nullptr;
1136
1137     while (true) {
1138         if (loglevel >= MDCLOG_DEBUG) {
1139             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1140             clock_gettime(CLOCK_MONOTONIC, &start);
1141         }
1142         // read the buffer directly to rmr payload
1143         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1144         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1145                 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1146
1147         if (loglevel >= MDCLOG_DEBUG) {
1148             mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1149                          message.peerInfo->fileDescriptor, message.message.asnLength);
1150         }
1151
1152         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1153         message.message.direction = 'U';
1154         message.message.time.tv_nsec = ts.tv_nsec;
1155         message.message.time.tv_sec = ts.tv_sec;
1156
1157         if (message.message.asnLength < 0) {
1158             if (errno == EINTR) {
1159                 continue;
1160             }
1161             /* If errno == EAGAIN, that means we have read all
1162                data. So goReportingMessages_t back to the main loop. */
1163             if (errno != EAGAIN) {
1164                 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1165                 done = 1;
1166             } else if (loglevel >= MDCLOG_DEBUG) {
1167                 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1168             }
1169             break;
1170         } else if (message.message.asnLength == 0) {
1171             /* End of file. The remote has closed the connection. */
1172             if (loglevel >= MDCLOG_INFO) {
1173                 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1174                              message.peerInfo->fileDescriptor);
1175             }
1176             done = 1;
1177             break;
1178         }
1179
1180         if (loglevel >= MDCLOG_DEBUG) {
1181             char printBuffer[4096]{};
1182             char *tmp = printBuffer;
1183             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1184                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1185                 tmp += 2;
1186             }
1187             printBuffer[message.message.asnLength] = 0;
1188             clock_gettime(CLOCK_MONOTONIC, &end);
1189             mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1190                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1191             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
1192                          printBuffer);
1193             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1194         }
1195
1196         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1197                           message.message.asndata, message.message.asnLength);
1198         if (rval.code != RC_OK) {
1199             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1200                          message.peerInfo->enodbName);
1201             break;
1202         }
1203
1204         if (loglevel >= MDCLOG_DEBUG) {
1205             clock_gettime(CLOCK_MONOTONIC, &end);
1206             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1207                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1208             char *printBuffer;
1209             size_t size;
1210             FILE *stream = open_memstream(&printBuffer, &size);
1211             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1212             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1213             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1214         }
1215
1216         switch (pdu->present) {
1217             case E2AP_PDU_PR_initiatingMessage: {//initiating message
1218                 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1219                 break;
1220             }
1221             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1222                 asnSuccsesfulMsg(pdu, sctpMap, message,  rmrMessageBuffer);
1223                 break;
1224             }
1225             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1226                 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1227                 break;
1228             }
1229             default:
1230                 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1231                 break;
1232         }
1233         if (loglevel >= MDCLOG_DEBUG) {
1234             clock_gettime(CLOCK_MONOTONIC, &end);
1235             mdclog_write(MDCLOG_DEBUG,
1236                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1237                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1238         }
1239         numOfMessages++;
1240         if (pdu != nullptr) {
1241             ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1242             //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1243             //pdu = nullptr;
1244         }
1245     }
1246
1247     if (done) {
1248         if (loglevel >= MDCLOG_INFO) {
1249             mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1250         }
1251         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1252                 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1253                          256,
1254                          "%s|CU disconnected unexpectedly",
1255                          message.peerInfo->enodbName);
1256         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1257
1258         if (sendRequestToXapp(message,
1259                               RIC_SCTP_CONNECTION_FAILURE,
1260                               rmrMessageBuffer) != 0) {
1261             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1262         }
1263
1264         /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1265         close(message.peerInfo->fileDescriptor);
1266         cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1267     }
1268     if (loglevel >= MDCLOG_DEBUG) {
1269         clock_gettime(CLOCK_MONOTONIC, &end);
1270         mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1271                      end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1272
1273     }
1274     return 0;
1275 }
1276
1277 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1278                                      RmrMessagesBuffer_t &rmrMessageBuffer,
1279                                      E2AP_PDU_t *pdu,
1280                                      string const &messageName,
1281                                      string const &ieName,
1282                                      vector<string> &functionsToAdd_v,
1283                                      vector<string> &functionsToModified_v) {
1284     auto logLevel = mdclog_level_get();
1285     // now we can send the data to e2Mgr
1286
1287     asn_enc_rval_t er;
1288     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1289     unsigned char *buffer;
1290     while (true) {
1291         buffer = (unsigned char *)malloc(buffer_size);
1292         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1293         if (er.encoded == -1) {
1294             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1295             return;
1296         } else if (er.encoded > (ssize_t) buffer_size) {
1297             buffer_size = er.encoded + 128;
1298             mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1299                          (int) buffer_size,
1300                          asn_DEF_E2AP_PDU.name, buffer_size);
1301             buffer_size = er.encoded + 128;
1302             free(buffer);
1303             continue;
1304         }
1305         buffer[er.encoded] = '\0';
1306         break;
1307     }
1308     // encode to xml
1309
1310     string res {};
1311     if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1312         res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1313     }
1314     rmr_mbuf_t *rmrMsg;
1315     if (res.length() == 0) {
1316         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1317         rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1318                                message.peerInfo->sctpParams->myIP.c_str(),
1319                                message.peerInfo->sctpParams->rmrPort,
1320                                buffer);
1321     } else {
1322         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1323         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1324                                message.peerInfo->sctpParams->myIP.c_str(),
1325                                message.peerInfo->sctpParams->rmrPort,
1326                                res.c_str());
1327     }
1328
1329     if (logLevel >= MDCLOG_DEBUG) {
1330         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1331     }
1332     // send to RMR
1333     rmrMsg->mtype = message.message.messageType;
1334     rmrMsg->state = 0;
1335     rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1336
1337     static unsigned char tx[32];
1338     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1339     rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1340
1341     rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1342     if (rmrMsg == nullptr) {
1343         mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1344     } else if (rmrMsg->state != 0) {
1345         char meid[RMR_MAX_MEID]{};
1346         if (rmrMsg->state == RMR_ERR_RETRY) {
1347             usleep(5);
1348             rmrMsg->state = 0;
1349             mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1350                          rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1351             rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1352             if (rmrMsg == nullptr) {
1353                 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1354             } else if (rmrMsg->state != 0) {
1355                 mdclog_write(MDCLOG_ERR,
1356                              "RMR Retry failed %s sending request %d to Xapp from %s",
1357                              translateRmrErrorMessages(rmrMsg->state).c_str(),
1358                              rmrMsg->mtype,
1359                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
1360             }
1361         } else {
1362             mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1363                          translateRmrErrorMessages(rmrMsg->state).c_str(),
1364                          rmrMsg->mtype,
1365                          rmr_get_meid(rmrMsg, (unsigned char *) meid));
1366         }
1367     }
1368     message.peerInfo->gotSetup = true;
1369     buildJsonMessage(message);
1370     if (rmrMsg != nullptr) {
1371         rmr_free_msg(rmrMsg);
1372     }
1373     free(buffer);
1374 }
1375
1376 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1377     auto index = 0;
1378     runFunXML_v.clear();
1379     for (auto j = 0; j < list.list.count; j++) {
1380         auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1381         if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1382             (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1383             // encode to xml
1384             E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1385             auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1386                                    &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1387                                    (void **)&ranFunDef,
1388                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1389                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1390             if (rval.code != RC_OK) {
1391                 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1392                              rval.code,
1393                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1394                 return -1;
1395             }
1396
1397             auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1398             unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1399             memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1400             // encode to xml
1401             auto er = asn_encode_to_buffer(nullptr,
1402                                            ATS_BASIC_XER,
1403                                            &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1404                                            ranFunDef,
1405                                            xml_buffer,
1406                                            xml_buffer_size);
1407             if (er.encoded == -1) {
1408                 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1409                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1410                              strerror(errno));
1411             } else if (er.encoded > (ssize_t)xml_buffer_size) {
1412                 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1413                              (int) xml_buffer_size,
1414                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1415             } else {
1416                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1417                     mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1418                                  asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1419                                  index++,
1420                                  xml_buffer);
1421                 }
1422
1423                 string runFuncs = (char *)(xml_buffer);
1424                 runFunXML_v.emplace_back(runFuncs);
1425             }
1426         }
1427     }
1428     return 0;
1429 }
1430
1431 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1432                                      Sctp_Map_t *sctpMap,
1433                                      ReportingMessages_t &message,
1434                                      vector <string> &RANfunctionsAdded_v,
1435                                      vector <string> &RANfunctionsModified_v) {
1436     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1437     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1438         auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1439         if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1440             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1441                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1442                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1443                                  ie->value.choice.RANfunctions_List.list.count);
1444                 }
1445                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1446                     return -1;
1447                 }
1448             }
1449         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1450             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1451                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1452                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1453                                  ie->value.choice.RANfunctions_List.list.count);
1454                 }
1455                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1456                     return -1;
1457                 }
1458             }
1459         }
1460     }
1461     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1462         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1463                      RANfunctionsAdded_v.size());
1464     }
1465     return 0;
1466 }
1467
1468
1469
1470 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1471     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1472     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1473
1474     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1475     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1476
1477     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1478     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1479
1480     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1481     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1482
1483     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1484     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1485     // ---------------------------------------------
1486     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1487     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1488
1489     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1490     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1491
1492     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1493     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1494
1495     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1496     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1497     //-------------------------------------------------------------
1498
1499     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1500     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1501
1502     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1503     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1504
1505     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1506     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1507
1508     //====================================================================================
1509     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1510     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1511
1512     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1513     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1514
1515     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1516     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1517
1518     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1519     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1520
1521     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1522     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1523
1524     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1525     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1526     //---------------------------------------------------------------------------------------------------------
1527     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1528     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1529
1530     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1531     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1532
1533     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1534     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1535     //----------------------------------------------------------------------------------------------------------------
1536     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1537     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1538
1539     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1540     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1541 }
1542 /**
1543  *
1544  * @param pdu
1545  * @param sctpMap
1546  * @param message
1547  * @param RANfunctionsAdded_v
1548  * @return
1549  */
1550 int collectSetupRequestData(E2AP_PDU_t *pdu,
1551                                      Sctp_Map_t *sctpMap,
1552                                      ReportingMessages_t &message,
1553                                      vector <string> &RANfunctionsAdded_v) {
1554     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1555     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1556         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1557         if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1558             // get the ran name for meid
1559             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1560                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1561                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1562                     // no mesage will be sent
1563                     return -1;
1564                 }
1565
1566                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1567                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1568             }
1569         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1570             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1571                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1572                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1573                                  ie->value.choice.RANfunctions_List.list.count);
1574                 }
1575                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1576                     return -1;
1577                 }
1578             }
1579         }
1580     }
1581     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1582         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1583                      RANfunctionsAdded_v.size());
1584     }
1585     return 0;
1586 }
1587
1588 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1589     E2AP_PDU_t *pdu = nullptr;
1590
1591     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1592         mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1593                      rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1594     }
1595     auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1596                            rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1597     if (rval.code != RC_OK) {
1598         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1599                      rval.code,
1600                      message.message.enodbName);
1601         return -1;
1602     }
1603
1604     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1605     auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1606                                    rmrMessageBuffer.sendMessage->payload, buff_size);
1607     if (er.encoded == -1) {
1608         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1609         return -1;
1610     } else if (er.encoded > (ssize_t)buff_size) {
1611         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1612                      (int)rmrMessageBuffer.sendMessage->len,
1613                      asn_DEF_E2AP_PDU.name,
1614                      __func__,
1615                      __LINE__);
1616         return -1;
1617     }
1618     rmrMessageBuffer.sendMessage->len = er.encoded;
1619     return 0;
1620
1621 }
1622
1623 /**
1624  *
1625  * @param pdu
1626  * @param message
1627  * @param rmrMessageBuffer
1628  */
1629 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1630                           Sctp_Map_t *sctpMap,
1631                           ReportingMessages_t &message,
1632                           RmrMessagesBuffer_t &rmrMessageBuffer) {
1633     auto logLevel = mdclog_level_get();
1634     auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1635     if (logLevel >= MDCLOG_DEBUG) {
1636         mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1637     }
1638     switch (procedureCode) {
1639         case ProcedureCode_id_E2setup: {
1640             if (logLevel >= MDCLOG_DEBUG) {
1641                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1642             }
1643
1644             vector <string> RANfunctionsAdded_v;
1645             vector <string> RANfunctionsModified_v;
1646             RANfunctionsAdded_v.clear();
1647             RANfunctionsModified_v.clear();
1648             if (collectSetupRequestData(pdu, sctpMap, message, RANfunctionsAdded_v) != 0) {
1649                 break;
1650             }
1651
1652             buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1653
1654             string messageName("E2setupRequest");
1655             string ieName("E2setupRequestIEs");
1656             message.message.messageType = RIC_E2_SETUP_REQ;
1657             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
1658             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1659             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
1660             break;
1661         }
1662         case ProcedureCode_id_RICserviceUpdate: {
1663             if (logLevel >= MDCLOG_DEBUG) {
1664                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1665             }
1666             vector <string> RANfunctionsAdded_v;
1667             vector <string> RANfunctionsModified_v;
1668             RANfunctionsAdded_v.clear();
1669             RANfunctionsModified_v.clear();
1670             if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1671                                                  RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1672                 break;
1673             }
1674
1675             string messageName("RICserviceUpdate");
1676             string ieName("RICserviceUpdateIEs");
1677             message.message.messageType = RIC_SERVICE_UPDATE;
1678             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
1679             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1680
1681             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
1682             break;
1683         }
1684         case ProcedureCode_id_ErrorIndication: {
1685             if (logLevel >= MDCLOG_DEBUG) {
1686                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1687             }
1688             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
1689             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1690             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1691                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1692             }
1693             break;
1694         }
1695         case ProcedureCode_id_Reset: {
1696             if (logLevel >= MDCLOG_DEBUG) {
1697                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1698             }
1699
1700             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1701             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1702             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1703                 break;
1704             }
1705
1706             if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1707                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1708             }
1709             break;
1710         }
1711         case ProcedureCode_id_RICindication: {
1712             if (logLevel >= MDCLOG_DEBUG) {
1713                 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1714             }
1715             for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1716                 auto messageSent = false;
1717                 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1718                 if (logLevel >= MDCLOG_DEBUG) {
1719                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1720                 }
1721                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1722                     if (logLevel >= MDCLOG_DEBUG) {
1723                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1724                     }
1725                     if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1726                         static unsigned char tx[32];
1727                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1728                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1729                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1730                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1731                                        (unsigned char *)message.message.enodbName,
1732                                        strlen(message.message.enodbName));
1733                         rmrMessageBuffer.sendMessage->state = 0;
1734                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1735
1736                         //ie->value.choice.RICrequestID.ricInstanceID;
1737                         if (mdclog_level_get() >= MDCLOG_DEBUG) {
1738                             mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1739                                          rmrMessageBuffer.sendMessage->sub_id,
1740                                          rmrMessageBuffer.sendMessage->mtype,
1741                                          ie->value.choice.RICrequestID.ricInstanceID,
1742                                          ie->value.choice.RICrequestID.ricRequestorID);
1743                         }
1744                         message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication - 1]->Increment();
1745                         message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1746                         sendRmrMessage(rmrMessageBuffer, message);
1747                         messageSent = true;
1748                     } else {
1749                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1750                     }
1751                 }
1752                 if (messageSent) {
1753                     break;
1754                 }
1755             }
1756             break;
1757         }
1758         default: {
1759             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1760             message.message.messageType = 0; // no RMR message type yet
1761
1762             buildJsonMessage(message);
1763
1764             break;
1765         }
1766     }
1767 }
1768
1769 /**
1770  *
1771  * @param pdu
1772  * @param message
1773  * @param rmrMessageBuffer
1774  */
1775 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1776                       Sctp_Map_t *sctpMap,
1777                       ReportingMessages_t &message,
1778                       RmrMessagesBuffer_t &rmrMessageBuffer) {
1779     auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1780     auto logLevel = mdclog_level_get();
1781     if (logLevel >= MDCLOG_INFO) {
1782         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1783     }
1784     switch (procedureCode) {
1785         case ProcedureCode_id_Reset: {
1786             if (logLevel >= MDCLOG_DEBUG) {
1787                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1788             }
1789             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1790             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1791             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1792                 break;
1793             }
1794             if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1795                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1796             }
1797             break;
1798         }
1799         case ProcedureCode_id_RICcontrol: {
1800             if (logLevel >= MDCLOG_DEBUG) {
1801                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1802             }
1803             for (auto i = 0;
1804                  i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1805                 auto messageSent = false;
1806                 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1807                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1808                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1809                 }
1810                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1811                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1812                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1813                     }
1814                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1815                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1816                         rmrMessageBuffer.sendMessage->state = 0;
1817 //                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1818                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1819
1820                         static unsigned char tx[32];
1821                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1822                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1823                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1824                                        (unsigned char *)message.message.enodbName,
1825                                        strlen(message.message.enodbName));
1826
1827                         message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1828                         message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1829                         sendRmrMessage(rmrMessageBuffer, message);
1830                         messageSent = true;
1831                     } else {
1832                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1833                     }
1834                 }
1835                 if (messageSent) {
1836                     break;
1837                 }
1838             }
1839
1840             break;
1841         }
1842         case ProcedureCode_id_RICsubscription: {
1843             if (logLevel >= MDCLOG_DEBUG) {
1844                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1845             }
1846             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1847             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1848             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1849                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1850             }
1851             break;
1852         }
1853         case ProcedureCode_id_RICsubscriptionDelete: {
1854             if (logLevel >= MDCLOG_DEBUG) {
1855                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1856             }
1857             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1858             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1859             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1860                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1861             }
1862             break;
1863         }
1864         default: {
1865             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1866             message.message.messageType = 0; // no RMR message type yet
1867             buildJsonMessage(message);
1868
1869             break;
1870         }
1871     }
1872 }
1873
1874 /**
1875  *
1876  * @param pdu
1877  * @param message
1878  * @param rmrMessageBuffer
1879  */
1880 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1881                         Sctp_Map_t *sctpMap,
1882                         ReportingMessages_t &message,
1883                         RmrMessagesBuffer_t &rmrMessageBuffer) {
1884     auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1885     auto logLevel = mdclog_level_get();
1886     if (logLevel >= MDCLOG_INFO) {
1887         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1888     }
1889     switch (procedureCode) {
1890         case ProcedureCode_id_RICcontrol: {
1891             if (logLevel >= MDCLOG_DEBUG) {
1892                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1893             }
1894             for (int i = 0;
1895                  i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1896                 auto messageSent = false;
1897                 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1898                 if (logLevel >= MDCLOG_DEBUG) {
1899                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1900                 }
1901                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1902                     if (logLevel >= MDCLOG_DEBUG) {
1903                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1904                     }
1905                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1906                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1907                         rmrMessageBuffer.sendMessage->state = 0;
1908 //                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1909                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1910                         static unsigned char tx[32];
1911                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1912                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1913                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1914                                        strlen(message.message.enodbName));
1915                         message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1916                         message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1917                         sendRmrMessage(rmrMessageBuffer, message);
1918                         messageSent = true;
1919                     } else {
1920                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1921                     }
1922                 }
1923                 if (messageSent) {
1924                     break;
1925                 }
1926             }
1927             break;
1928         }
1929         case ProcedureCode_id_RICsubscription: {
1930             if (logLevel >= MDCLOG_DEBUG) {
1931                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1932             }
1933             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1934             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1935             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1936                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1937             }
1938             break;
1939         }
1940         case ProcedureCode_id_RICsubscriptionDelete: {
1941             if (logLevel >= MDCLOG_DEBUG) {
1942                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1943             }
1944             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1945             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1946             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1947                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1948             }
1949             break;
1950         }
1951         default: {
1952             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1953             message.message.messageType = 0; // no RMR message type yet
1954
1955             buildJsonMessage(message);
1956
1957             break;
1958         }
1959     }
1960 }
1961
1962 /**
1963  *
1964  * @param message
1965  * @param requestId
1966  * @param rmrMmessageBuffer
1967  * @return
1968  */
1969 int sendRequestToXapp(ReportingMessages_t &message,
1970                       int requestId,
1971                       RmrMessagesBuffer_t &rmrMmessageBuffer) {
1972     rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1973                    (unsigned char *)message.message.enodbName,
1974                    strlen(message.message.enodbName));
1975     message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1976     rmrMmessageBuffer.sendMessage->state = 0;
1977     static unsigned char tx[32];
1978     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1979     rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1980
1981     auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1982     return rc;
1983 }
1984
1985 /**
1986  *
1987  * @param pSctpParams
1988  */
1989 void getRmrContext(sctp_params_t &pSctpParams) {
1990     pSctpParams.rmrCtx = nullptr;
1991     pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1992     if (pSctpParams.rmrCtx == nullptr) {
1993         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1994         return;
1995     }
1996
1997     rmr_set_stimeout(pSctpParams.rmrCtx, 0);    // disable retries for any send operation
1998     // we need to find that routing table exist and we can run
1999     if (mdclog_level_get() >= MDCLOG_INFO) {
2000         mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2001     }
2002     int rmrReady = 0;
2003     int count = 0;
2004     while (!rmrReady) {
2005         if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2006             sleep(1);
2007         }
2008         count++;
2009         if (count % 60 == 0) {
2010             mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2011         }
2012     }
2013     if (mdclog_level_get() >= MDCLOG_INFO) {
2014         mdclog_write(MDCLOG_INFO, "RMR running");
2015     }
2016     rmr_init_trace(pSctpParams.rmrCtx, 200);
2017     // get the RMR fd for the epoll
2018     pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2019     struct epoll_event event{};
2020     // add RMR fd to epoll
2021     event.events = (EPOLLIN);
2022     event.data.fd = pSctpParams.rmrListenFd;
2023     // add listening RMR FD to epoll
2024     if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2025         mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2026         close(pSctpParams.rmrListenFd);
2027         rmr_close(pSctpParams.rmrCtx);
2028         pSctpParams.rmrCtx = nullptr;
2029     }
2030 }
2031
2032 /**
2033  *
2034  * @param message
2035  * @param rmrMessageBuffer
2036  * @return
2037  */
2038 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2039     E2AP_PDU_t *pdu = nullptr;
2040
2041     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2042         mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
2043                 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2044     }
2045     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2046                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2047     if (rval.code != RC_OK) {
2048         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
2049                      rval.code,
2050                      message.message.enodbName);
2051         return -1;
2052     }
2053
2054     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2055     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2056                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
2057     if (er.encoded == -1) {
2058         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2059         return -1;
2060     } else if (er.encoded > (ssize_t)buff_size) {
2061         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2062                      (int)rmrMessageBuffer.rcvMessage->len,
2063                      asn_DEF_E2AP_PDU.name,
2064                      __func__,
2065                      __LINE__);
2066         return -1;
2067     }
2068     rmrMessageBuffer.rcvMessage->len = er.encoded;
2069     return 0;
2070 }
2071
2072 /**
2073  *
2074  * @param sctpMap
2075  * @param rmrMessageBuffer
2076  * @param ts
2077  * @return
2078  */
2079 int receiveXappMessages(Sctp_Map_t *sctpMap,
2080                         RmrMessagesBuffer_t &rmrMessageBuffer,
2081                         struct timespec &ts) {
2082     if (rmrMessageBuffer.rcvMessage == nullptr) {
2083         //we have error
2084         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2085         return -1;
2086     }
2087
2088     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2089         mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2090     }
2091     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2092     if (rmrMessageBuffer.rcvMessage == nullptr) {
2093         mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2094         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2095         return -2;
2096     }
2097     ReportingMessages_t message;
2098     message.message.direction = 'D';
2099     message.message.time.tv_nsec = ts.tv_nsec;
2100     message.message.time.tv_sec = ts.tv_sec;
2101
2102     // get message payload
2103     //auto msgData = msg->payload;
2104     if (rmrMessageBuffer.rcvMessage->state != 0) {
2105         mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2106         return -1;
2107     }
2108     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2109     message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2110     if (message.peerInfo == nullptr) {
2111         auto type = rmrMessageBuffer.rcvMessage->mtype;
2112         switch (type) {
2113             case RIC_SCTP_CLEAR_ALL:
2114             case E2_TERM_KEEP_ALIVE_REQ:
2115             case RIC_HEALTH_CHECK_REQ:
2116                 break;
2117             default:
2118                 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2119                 return -1;
2120         }
2121     }
2122
2123     switch (rmrMessageBuffer.rcvMessage->mtype) {
2124         case RIC_E2_SETUP_RESP : {
2125             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2126                 break;
2127             }
2128             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2129             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2130             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2131                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2132                 return -6;
2133             }
2134             break;
2135         }
2136         case RIC_E2_SETUP_FAILURE : {
2137             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2138                 break;
2139             }
2140             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2141             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2142             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2143                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2144                 return -6;
2145             }
2146             break;
2147         }
2148         case RIC_ERROR_INDICATION: {
2149             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
2150             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2151             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2152                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2153                 return -6;
2154             }
2155             break;
2156         }
2157         case RIC_SUB_REQ: {
2158             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
2159             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2160             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2161                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2162                 return -6;
2163             }
2164             break;
2165         }
2166         case RIC_SUB_DEL_REQ: {
2167             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
2168             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2169             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2170                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2171                 return -6;
2172             }
2173             break;
2174         }
2175         case RIC_CONTROL_REQ: {
2176             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
2177             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2178             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2179                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2180                 return -6;
2181             }
2182             break;
2183         }
2184         case RIC_SERVICE_QUERY: {
2185             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2186                 break;
2187             }
2188             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment();
2189             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2190             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2191                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2192                 return -6;
2193             }
2194             break;
2195         }
2196         case RIC_SERVICE_UPDATE_ACK: {
2197             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2198                 break;
2199             }
2200             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2201             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2202             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2203                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2204                 return -6;
2205             }
2206             break;
2207         }
2208         case RIC_SERVICE_UPDATE_FAILURE: {
2209             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2210                 break;
2211             }
2212             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2213             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2214             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2215                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2216                 return -6;
2217             }
2218             break;
2219         }
2220         case RIC_E2_RESET_REQ: {
2221             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2222                 break;
2223             }
2224             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2225             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2226             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2227                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2228                 return -6;
2229             }
2230             break;
2231         }
2232         case RIC_E2_RESET_RESP: {
2233             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2234                 break;
2235             }
2236             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2237             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2238             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2239                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2240                 return -6;
2241             }
2242             break;
2243         }
2244         case RIC_SCTP_CLEAR_ALL: {
2245             mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2246             // loop on all keys and close socket and then erase all map.
2247             vector<char *> v;
2248             sctpMap->getKeys(v);
2249             for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2250                 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2251                     auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2252                     if (peerInfo == nullptr) {
2253                         continue;
2254                     }
2255                     close(peerInfo->fileDescriptor);
2256                     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2257                     message.message.direction = 'D';
2258                     message.message.time.tv_nsec = ts.tv_nsec;
2259                     message.message.time.tv_sec = ts.tv_sec;
2260
2261                     message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2262                             snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2263                                      256,
2264                                      "%s|RIC_SCTP_CLEAR_ALL",
2265                                      peerInfo->enodbName);
2266                     message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2267                     mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2268                     if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2269                         mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2270                     }
2271                     free(peerInfo);
2272                 }
2273             }
2274
2275             sleep(1);
2276             sctpMap->clear();
2277             break;
2278         }
2279         case E2_TERM_KEEP_ALIVE_REQ: {
2280             // send message back
2281             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2282                               (unsigned char *)rmrMessageBuffer.ka_message,
2283                               rmrMessageBuffer.ka_message_len);
2284             rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2285             rmrMessageBuffer.sendMessage->state = 0;
2286             static unsigned char tx[32];
2287             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2288             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2289             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2290             if (rmrMessageBuffer.sendMessage == nullptr) {
2291                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2292                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2293             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2294                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2295                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2296             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2297                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2298             }
2299
2300             break;
2301         }
2302         case RIC_HEALTH_CHECK_REQ: {
2303             // send message back
2304             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2305                               (unsigned char *)"OK",
2306                               2);
2307             rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2308             rmrMessageBuffer.sendMessage->state = 0;
2309             static unsigned char tx[32];
2310             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2311             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2312             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2313             if (rmrMessageBuffer.sendMessage == nullptr) {
2314                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2315                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2316             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2317                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2318                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2319             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2320                 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2321             }
2322
2323             break;
2324         }
2325
2326         default:
2327             mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2328             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2329             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2330             message.message.time.tv_nsec = ts.tv_nsec;
2331             message.message.time.tv_sec = ts.tv_sec;
2332             message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2333
2334             buildJsonMessage(message);
2335
2336
2337             return -7;
2338     }
2339     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2340         mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2341     }
2342     return 0;
2343 }
2344
2345 /**
2346  * Send message to the CU that is not expecting for successful or unsuccessful results
2347  * @param messageBuffer
2348  * @param message
2349  * @param failedMsgId
2350  * @param sctpMap
2351  * @return
2352  */
2353 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2354                            ReportingMessages_t &message,
2355                            int failedMsgId,
2356                            Sctp_Map_t *sctpMap) {
2357
2358     getRequestMetaData(message, messageBuffer);
2359     if (mdclog_level_get() >= MDCLOG_INFO) {
2360         mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2361     }
2362
2363     auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2364     return rc;
2365 }
2366
2367 /**
2368  *
2369  * @param sctpMap
2370  * @param messageBuffer
2371  * @param message
2372  * @param failedMesgId
2373  * @return
2374  */
2375 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2376                     RmrMessagesBuffer_t &messageBuffer,
2377                     ReportingMessages_t &message,
2378                     int failedMesgId) {
2379     // get the FD
2380     message.message.messageType = messageBuffer.rcvMessage->mtype;
2381     auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2382     return rc;
2383 }
2384
2385 /**
2386  *
2387  * @param rmrCtx the rmr context to send and receive
2388  * @param msg the msg we got fromxApp
2389  * @param metaData data from xApp in ordered struct
2390  * @param failedMesgId the return message type error
2391  */
2392 void
2393 sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
2394     rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
2395     msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
2396                         message.message.enodbName);
2397     if (mdclog_level_get() >= MDCLOG_INFO) {
2398         mdclog_write(MDCLOG_INFO, "%s", msg->payload);
2399     }
2400     msg->mtype = failedMesgId;
2401     msg->state = 0;
2402
2403     static unsigned char tx[32];
2404     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2405     rmr_bytes2xact(msg, tx, strlen((const char *) tx));
2406
2407     sendRmrMessage(rmrMessageBuffer, message);
2408 }
2409
2410
2411
2412 /**
2413  *
2414  * @param epoll_fd
2415  * @param peerInfo
2416  * @param events
2417  * @param sctpMap
2418  * @param enodbName
2419  * @param msgType
2420  * @return
2421  */
2422 int addToEpoll(int epoll_fd,
2423                ConnectedCU_t *peerInfo,
2424                uint32_t events,
2425                Sctp_Map_t *sctpMap,
2426                char *enodbName,
2427                int msgType) {
2428     // Add to Epol
2429     struct epoll_event event{};
2430     event.data.ptr = peerInfo;
2431     event.events = events;
2432     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2433         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2434             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2435                          strerror(errno), __func__, __LINE__);
2436         }
2437         close(peerInfo->fileDescriptor);
2438         if (enodbName != nullptr) {
2439             cleanHashEntry(peerInfo, sctpMap);
2440             char key[MAX_ENODB_NAME_SIZE * 2];
2441             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2442             if (mdclog_level_get() >= MDCLOG_DEBUG) {
2443                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2444             }
2445             auto tmp = sctpMap->find(key);
2446             if (tmp) {
2447                 free(tmp);
2448                 sctpMap->erase(key);
2449             }
2450         } else {
2451             peerInfo->enodbName[0] = 0;
2452         }
2453         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2454         return -1;
2455     }
2456     return 0;
2457 }
2458
2459 /**
2460  *
2461  * @param epoll_fd
2462  * @param peerInfo
2463  * @param events
2464  * @param sctpMap
2465  * @param enodbName
2466  * @param msgType
2467  * @return
2468  */
2469 int modifyToEpoll(int epoll_fd,
2470                   ConnectedCU_t *peerInfo,
2471                   uint32_t events,
2472                   Sctp_Map_t *sctpMap,
2473                   char *enodbName,
2474                   int msgType) {
2475     // Add to Epol
2476     struct epoll_event event{};
2477     event.data.ptr = peerInfo;
2478     event.events = events;
2479     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2480         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2481             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2482                          strerror(errno), __func__, __LINE__);
2483         }
2484         close(peerInfo->fileDescriptor);
2485         cleanHashEntry(peerInfo, sctpMap);
2486         char key[MAX_ENODB_NAME_SIZE * 2];
2487         snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2488         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2489             mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2490         }
2491         auto tmp = sctpMap->find(key);
2492         if (tmp) {
2493             free(tmp);
2494         }
2495         sctpMap->erase(key);
2496         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2497         return -1;
2498     }
2499     return 0;
2500 }
2501
2502
2503 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2504     buildJsonMessage(message);
2505
2506     rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2507
2508     if (rmrMessageBuffer.sendMessage == nullptr) {
2509         rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2510         mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2511         return -1;
2512     }
2513
2514     if (rmrMessageBuffer.sendMessage->state != 0) {
2515         char meid[RMR_MAX_MEID]{};
2516         if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2517             usleep(5);
2518             rmrMessageBuffer.sendMessage->state = 0;
2519             mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2520                          rmrMessageBuffer.sendMessage->mtype,
2521                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2522             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2523             if (rmrMessageBuffer.sendMessage == nullptr) {
2524                 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2525                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2526                 return -1;
2527             } else if (rmrMessageBuffer.sendMessage->state != 0) {
2528                 mdclog_write(MDCLOG_ERR,
2529                              "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2530                              translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2531                              rmrMessageBuffer.sendMessage->mtype,
2532                              rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2533                 auto rc = rmrMessageBuffer.sendMessage->state;
2534                 return rc;
2535             }
2536         } else {
2537             mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2538                          translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2539                          rmrMessageBuffer.sendMessage->mtype,
2540                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2541             return rmrMessageBuffer.sendMessage->state;
2542         }
2543     }
2544     return 0;
2545 }
2546
2547 void buildJsonMessage(ReportingMessages_t &message) {
2548     if (jsonTrace) {
2549         message.outLen = sizeof(message.base64Data);
2550         base64::encode((const unsigned char *) message.message.asndata,
2551                        (const int) message.message.asnLength,
2552                        message.base64Data,
2553                        message.outLen);
2554         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2555             mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2556                          (int) message.message.asnLength,
2557                          (int) message.outLen);
2558         }
2559
2560         snprintf(message.buffer, sizeof(message.buffer),
2561                  "{\"header\": {\"ts\": \"%ld.%09ld\","
2562                  "\"ranName\": \"%s\","
2563                  "\"messageType\": %d,"
2564                  "\"direction\": \"%c\"},"
2565                  "\"base64Length\": %d,"
2566                  "\"asnBase64\": \"%s\"}",
2567                  message.message.time.tv_sec,
2568                  message.message.time.tv_nsec,
2569                  message.message.enodbName,
2570                  message.message.messageType,
2571                  message.message.direction,
2572                  (int) message.outLen,
2573                  message.base64Data);
2574         static src::logger_mt &lg = my_logger::get();
2575
2576         BOOST_LOG(lg) << message.buffer;
2577     }
2578 }
2579
2580
2581 /**
2582  * take RMR error code to string
2583  * @param state
2584  * @return
2585  */
2586 string translateRmrErrorMessages(int state) {
2587     string str = {};
2588     switch (state) {
2589         case RMR_OK:
2590             str = "RMR_OK - state is good";
2591             break;
2592         case RMR_ERR_BADARG:
2593             str = "RMR_ERR_BADARG - argument passd to function was unusable";
2594             break;
2595         case RMR_ERR_NOENDPT:
2596             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2597             break;
2598         case RMR_ERR_EMPTY:
2599             str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2600             break;
2601         case RMR_ERR_NOHDR:
2602             str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2603             break;
2604         case RMR_ERR_SENDFAILED:
2605             str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2606             break;
2607         case RMR_ERR_CALLFAILED:
2608             str = "RMR_ERR_CALLFAILED - unable to send call() message";
2609             break;
2610         case RMR_ERR_NOWHOPEN:
2611             str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2612             break;
2613         case RMR_ERR_WHID:
2614             str = "RMR_ERR_WHID - wormhole id was invalid";
2615             break;
2616         case RMR_ERR_OVERFLOW:
2617             str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2618             break;
2619         case RMR_ERR_RETRY:
2620             str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2621             break;
2622         case RMR_ERR_RCVFAILED:
2623             str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2624             break;
2625         case RMR_ERR_TIMEOUT:
2626             str = "RMR_ERR_TIMEOUT - message processing call timed out";
2627             break;
2628         case RMR_ERR_UNSET:
2629             str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2630             break;
2631         case RMR_ERR_TRUNC:
2632             str = "RMR_ERR_TRUNC - received message likely truncated";
2633             break;
2634         case RMR_ERR_INITFAILED:
2635             str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2636             break;
2637         case RMR_ERR_NOTSUPP:
2638             str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2639             break;
2640         default:
2641             char buf[128]{};
2642             snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
2643             str = buf;
2644             break;
2645     }
2646     return str;
2647 }
2648
2649