Merge "5.0.5 Fix Prometheus bug on listening address in K8S (both IPv6 and Ipv4)...
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 //  This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 //  platform project (RICP).
18
19 // TODO: High-level file comment.
20
21
22
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
26
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
30
31 using namespace std;
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
35
36
37 //#ifdef __cplusplus
38 //extern "C"
39 //{
40 //#endif
41
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
44
45 static void catch_function(int signal) {
46     __gcov_flush();
47     exit(signal);
48 }
49
50
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
52
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
56
57 void init_log() {
58     mdclog_attr_t *attr;
59     mdclog_attr_init(&attr);
60     mdclog_attr_set_ident(attr, "E2Terminator");
61     mdclog_init(attr);
62     mdclog_attr_destroy(attr);
63 }
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
66
67 double age() {
68     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
69 }
70
71 double approx_CPU_MHz(unsigned sleeptime) {
72     using namespace std::chrono_literals;
73     uint32_t aux = 0;
74     uint64_t cycles_start = rdtscp(aux);
75     double time_start = age();
76     std::this_thread::sleep_for(sleeptime * 1ms);
77     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78     double elapsed_time = age() - time_start;
79     return elapsed_cycles / elapsed_time;
80 }
81
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
86
87 int buildListeningPort(sctp_params_t &sctpParams) {
88     sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89     if (sctpParams.listenFD <= 0) {
90         mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
91         return -1;
92     }
93
94     struct sockaddr_in6 servaddr {};
95     servaddr.sin6_family = AF_INET6;
96     servaddr.sin6_addr   = in6addr_any;
97     servaddr.sin6_port = htons(sctpParams.sctpPort);
98     if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
99         mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
100         return -1;
101     }
102     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
103         //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
104         return -1;
105     }
106     if (mdclog_level_get() >= MDCLOG_DEBUG) {
107         struct sockaddr_in6 cliaddr {};
108         socklen_t len = sizeof(cliaddr);
109         getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
110         char buff[1024] {};
111         inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
112         mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
113     }
114
115     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
116         mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
117         return -1;
118     }
119     struct epoll_event event {};
120     event.events = EPOLLIN | EPOLLET;
121     event.data.fd = sctpParams.listenFD;
122
123     // add listening port to epoll
124     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
125         printf("Failed to add descriptor to epoll\n");
126         mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
127         return -1;
128     }
129
130     return 0;
131 }
132
133 int buildConfiguration(sctp_params_t &sctpParams) {
134     path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
135     if (exists(p)) {
136         const int size = 2048;
137         auto fileSize = file_size(p);
138         if (fileSize > size) {
139             mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
140             return -1;
141         }
142     } else {
143         mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
144         return -1;
145     }
146
147     ReadConfigFile conf;
148     if (conf.openConfigFile(p.string()) == -1) {
149         mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
150                      p.string().c_str(), strerror(errno));
151         return -1;
152     }
153     int rmrPort = conf.getIntValue("nano");
154     if (rmrPort == -1) {
155         mdclog_write(MDCLOG_ERR, "illigal RMR port ");
156         return -1;
157     }
158     sctpParams.rmrPort = (uint16_t)rmrPort;
159     snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
160
161     auto tmpStr = conf.getStringValue("loglevel");
162     if (tmpStr.length() == 0) {
163         mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
164         tmpStr = "info";
165     }
166     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
167
168     if ((tmpStr.compare("debug")) == 0) {
169         sctpParams.logLevel = MDCLOG_DEBUG;
170     } else if ((tmpStr.compare("info")) == 0) {
171         sctpParams.logLevel = MDCLOG_INFO;
172     } else if ((tmpStr.compare("warning")) == 0) {
173         sctpParams.logLevel = MDCLOG_WARN;
174     } else if ((tmpStr.compare("error")) == 0) {
175         sctpParams.logLevel = MDCLOG_ERR;
176     } else {
177         mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
178         sctpParams.logLevel = MDCLOG_INFO;
179     }
180     mdclog_level_set(sctpParams.logLevel);
181
182     tmpStr = conf.getStringValue("volume");
183     if (tmpStr.length() == 0) {
184         mdclog_write(MDCLOG_ERR, "illigal volume.");
185         return -1;
186     }
187
188     char tmpLogFilespec[VOLUME_URL_SIZE];
189     tmpLogFilespec[0] = 0;
190     sctpParams.volume[0] = 0;
191     snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
192     // copy the name to temp file as well
193     snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
194
195
196     // define the file name in the tmp directory under the volume
197     strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
198
199     sctpParams.myIP = conf.getStringValue("local-ip");
200     if (sctpParams.myIP.length() == 0) {
201         mdclog_write(MDCLOG_ERR, "illigal local-ip.");
202         return -1;
203     }
204
205     int sctpPort = conf.getIntValue("sctp-port");
206     if (sctpPort == -1) {
207         mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
208         return -1;
209     }
210     sctpParams.sctpPort = (uint16_t)sctpPort;
211
212     sctpParams.fqdn = conf.getStringValue("external-fqdn");
213     if (sctpParams.fqdn.length() == 0) {
214         mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
215         return -1;
216     }
217
218     std::string pod = conf.getStringValue("pod_name");
219     if (pod.length() == 0) {
220         mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
221         return -1;
222     }
223     auto *podName = getenv(pod.c_str());
224     if (podName == nullptr) {
225         mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
226         return -1;
227
228     } else {
229         sctpParams.podName.assign(podName);
230         if (sctpParams.podName.length() == 0) {
231             mdclog_write(MDCLOG_ERR, "illigal pod_name");
232             return -1;
233         }
234     }
235
236     tmpStr = conf.getStringValue("trace");
237     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
238     if ((tmpStr.compare("start")) == 0) {
239         mdclog_write(MDCLOG_INFO, "Trace set to: start");
240         sctpParams.trace = true;
241     } else if ((tmpStr.compare("stop")) == 0) {
242         mdclog_write(MDCLOG_INFO, "Trace set to: stop");
243         sctpParams.trace = false;
244     }
245     jsonTrace = sctpParams.trace;
246
247     sctpParams.epollTimeOut = -1;
248
249     tmpStr = conf.getStringValue("prometheusPort");
250     if (tmpStr.length() != 0) {
251         sctpParams.prometheusPort = tmpStr;
252     }
253
254     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
255                                                                                     "\"fqdn\": \"%s\","
256                                                                                     "\"pod_name\": \"%s\"}",
257                                             (const char *)sctpParams.myIP.c_str(),
258                                             sctpParams.rmrPort,
259                                             sctpParams.fqdn.c_str(),
260                                             sctpParams.podName.c_str());
261
262     if (mdclog_level_get() >= MDCLOG_INFO) {
263         mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
264         mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
265         mdclog_mdc_add("volume", sctpParams.volume);
266         mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
267         mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
268         mdclog_mdc_add("pod name", sctpParams.podName.c_str());
269
270         mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
271     }
272     mdclog_mdc_clean();
273
274     // Files written to the current working directory
275     boostLogger = logging::add_file_log(
276             keywords::file_name = tmpLogFilespec, // to temp directory
277             keywords::rotation_size = 10 * 1024 * 1024,
278             keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
279             keywords::format = "%Message%"
280             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
281     );
282
283     // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
284     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
285             keywords::target = sctpParams.volume
286     ));
287
288     // Upon restart, scan the directory for files matching the file_name pattern
289     boostLogger->locked_backend()->scan_for_files();
290
291     // Enable auto-flushing after each tmpStr record written
292     if (mdclog_level_get() >= MDCLOG_DEBUG) {
293         boostLogger->locked_backend()->auto_flush(true);
294     }
295
296     return 0;
297 }
298
299 void startPrometheus(sctp_params_t &sctpParams) {
300     sctpParams.prometheusFamily = &BuildCounter()
301             .Name("E2T")
302             .Help("E2T message counter")
303             .Labels({{"POD_NAME", sctpParams.podName}})
304             .Register(*sctpParams.prometheusRegistry);
305
306     string promethusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
307     if (mdclog_level_get() >= MDCLOG_DEBUG) {
308         mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", promethusPath.c_str());
309     }
310     sctpParams.prometheusExposer = new Exposer(promethusPath, 1);
311     sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
312 }
313
314 int main(const int argc, char **argv) {
315     sctp_params_t sctpParams;
316
317     {
318         std::random_device device{};
319         std::mt19937 generator(device());
320         std::uniform_int_distribution<long> distribution(1, (long) 1e12);
321         transactionCounter = distribution(generator);
322     }
323
324 //    uint64_t st = 0;
325 //    uint32_t aux1 = 0;
326 //   st = rdtscp(aux1);
327
328     unsigned num_cpus = std::thread::hardware_concurrency();
329     init_log();
330     mdclog_level_set(MDCLOG_INFO);
331
332     if (std::signal(SIGINT, catch_function) == SIG_ERR) {
333         mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
334         exit(1);
335     }
336     if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
337         mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
338         exit(1);
339     }
340     if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
341         mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
342         exit(1);
343     }
344
345     cpuClock = approx_CPU_MHz(100);
346
347     mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
348
349     auto result = parse(argc, argv, sctpParams);
350
351     if (buildConfiguration(sctpParams) != 0) {
352         exit(-1);
353     }
354
355     //auto registry = std::make_shared<Registry>();
356     sctpParams.prometheusRegistry = std::make_shared<Registry>();
357
358     //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
359
360     startPrometheus(sctpParams);
361
362     // start epoll
363     sctpParams.epoll_fd = epoll_create1(0);
364     if (sctpParams.epoll_fd == -1) {
365         mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
366         exit(-1);
367     }
368
369     getRmrContext(sctpParams);
370     if (sctpParams.rmrCtx == nullptr) {
371         close(sctpParams.epoll_fd);
372         exit(-1);
373     }
374
375     if (buildInotify(sctpParams) == -1) {
376         close(sctpParams.rmrListenFd);
377         rmr_close(sctpParams.rmrCtx);
378         close(sctpParams.epoll_fd);
379         exit(-1);
380     }
381
382     if (buildListeningPort(sctpParams) != 0) {
383         close(sctpParams.rmrListenFd);
384         rmr_close(sctpParams.rmrCtx);
385         close(sctpParams.epoll_fd);
386         exit(-1);
387     }
388
389     sctpParams.sctpMap = new mapWrapper();
390
391     std::vector<std::thread> threads(num_cpus);
392 //    std::vector<std::thread> threads;
393
394     num_cpus = 1;
395     for (unsigned int i = 0; i < num_cpus; i++) {
396         threads[i] = std::thread(listener, &sctpParams);
397
398         cpu_set_t cpuset;
399         CPU_ZERO(&cpuset);
400         CPU_SET(i, &cpuset);
401         int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
402         if (rc != 0) {
403             mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
404         }
405     }
406
407
408     //loop over term_init until first message from xApp
409     handleTermInit(sctpParams);
410
411     for (auto &t : threads) {
412         t.join();
413     }
414
415     return 0;
416 }
417
418 void handleTermInit(sctp_params_t &sctpParams) {
419     sendTermInit(sctpParams);
420     //send to e2 manager init of e2 term
421     //E2_TERM_INIT
422
423     int count = 0;
424     while (true) {
425         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
426         if (xappMessages > 0) {
427             if (mdclog_level_get() >=  MDCLOG_INFO) {
428                 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
429             }
430             return;
431         }
432         usleep(100000);
433         count++;
434         if (count % 1000 == 0) {
435             mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
436             sendTermInit(sctpParams);
437         }
438     }
439 }
440
441 void sendTermInit(sctp_params_t &sctpParams) {
442     rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
443     auto count = 0;
444     while (true) {
445         msg->mtype = E2_TERM_INIT;
446         msg->state = 0;
447         rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
448         static unsigned char tx[32];
449         auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
450         rmr_bytes2xact(msg, tx, txLen);
451         msg = rmr_send_msg(sctpParams.rmrCtx, msg);
452         if (msg == nullptr) {
453             msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
454         } else if (msg->state == 0) {
455             rmr_free_msg(msg);
456             if (mdclog_level_get() >=  MDCLOG_INFO) {
457                 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
458             }
459             return;
460         } else {
461             if (count % 100 == 0) {
462                 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
463             }
464             sleep(1);
465         }
466         count++;
467     }
468 }
469
470 /**
471  *
472  * @param argc
473  * @param argv
474  * @param sctpParams
475  * @return
476  */
477 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
478     cxxopts::Options options(argv[0], "e2 term help");
479     options.positional_help("[optional args]").show_positional_help();
480     options.allow_unrecognised_options().add_options()
481             ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
482             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
483             ("h,help", "Print help");
484
485     auto result = options.parse(argc, argv);
486
487     if (result.count("help")) {
488         std::cout << options.help({""}) << std::endl;
489         exit(0);
490     }
491     return result;
492 }
493
494 /**
495  *
496  * @param sctpParams
497  * @return -1 failed 0 success
498  */
499 int buildInotify(sctp_params_t &sctpParams) {
500     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
501     if (sctpParams.inotifyFD == -1) {
502         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
503         close(sctpParams.rmrListenFd);
504         rmr_close(sctpParams.rmrCtx);
505         close(sctpParams.epoll_fd);
506         return -1;
507     }
508
509     sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
510                                              (const char *)sctpParams.configFilePath.c_str(),
511                                              (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
512     if (sctpParams.inotifyWD == -1) {
513         mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to  inotify (inotify_add_watch) %s",
514                      sctpParams.configFilePath.c_str(),
515                      strerror(errno));
516         close(sctpParams.inotifyFD);
517         return -1;
518     }
519
520     struct epoll_event event{};
521     event.events = (EPOLLIN);
522     event.data.fd = sctpParams.inotifyFD;
523     // add listening RMR FD to epoll
524     if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
525         mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
526         close(sctpParams.inotifyFD);
527         return -1;
528     }
529     return 0;
530 }
531
532 /**
533  *
534  * @param args
535  * @return
536  */
537 void listener(sctp_params_t *params) {
538     int num_of_SCTP_messages = 0;
539     auto totalTime = 0.0;
540     mdclog_mdc_clean();
541     mdclog_level_set(params->logLevel);
542
543     std::thread::id this_id = std::this_thread::get_id();
544     //save cout
545     streambuf *oldCout = cout.rdbuf();
546     ostringstream memCout;
547     // create new cout
548     cout.rdbuf(memCout.rdbuf());
549     cout << this_id;
550     //return to the normal cout
551     cout.rdbuf(oldCout);
552
553     char tid[32];
554     memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
555     tid[memCout.str().length()] = 0;
556     mdclog_mdc_add("thread id", tid);
557
558     if (mdclog_level_get() >= MDCLOG_DEBUG) {
559         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
560     }
561
562     RmrMessagesBuffer_t rmrMessageBuffer{};
563     //create and init RMR
564     rmrMessageBuffer.rmrCtx = params->rmrCtx;
565
566     auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
567     struct timespec end{0, 0};
568     struct timespec start{0, 0};
569
570     rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
571     rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
572
573     memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
574     rmrMessageBuffer.ka_message_len = params->ka_message_length;
575     rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
576
577     if (mdclog_level_get() >= MDCLOG_DEBUG) {
578         mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
579     }
580
581     ReportingMessages_t message {};
582
583 //    for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
584 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
585 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
586 //    }
587
588     while (true) {
589         if (mdclog_level_get() >= MDCLOG_DEBUG) {
590             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
591         }
592         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
593         if (numOfEvents == 0) { // time out
594             if (mdclog_level_get() >= MDCLOG_DEBUG) {
595                 mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
596             }
597             continue;
598         } else if (numOfEvents < 0) {
599             if (errno == EINTR) {
600                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
601                     mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
602                 }
603                 continue;
604             }
605             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
606             return;
607         }
608         for (auto i = 0; i < numOfEvents; i++) {
609             if (mdclog_level_get() >= MDCLOG_DEBUG) {
610                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
611             }
612             clock_gettime(CLOCK_MONOTONIC, &message.message.time);
613             start.tv_sec = message.message.time.tv_sec;
614             start.tv_nsec = message.message.time.tv_nsec;
615
616
617             if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
618                 handlepoll_error(events[i], message, rmrMessageBuffer, params);
619             } else if (events[i].events & EPOLLOUT) {
620                 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
621             } else if (params->listenFD == events[i].data.fd) {
622                 if (mdclog_level_get() >= MDCLOG_INFO) {
623                     mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
624                 }
625                 // new connection is requested from RAN  start build connection
626                 while (true) {
627                     struct sockaddr in_addr {};
628                     socklen_t in_len;
629                     char hostBuff[NI_MAXHOST];
630                     char portBuff[NI_MAXSERV];
631
632                     in_len = sizeof(in_addr);
633                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
634                     peerInfo->sctpParams = params;
635                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
636                     if (peerInfo->fileDescriptor == -1) {
637                         if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
638                             /* We have processed all incoming connections. */
639                             break;
640                         } else {
641                             mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
642                             break;
643                         }
644                     }
645                     if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
646                         mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
647                         close(peerInfo->fileDescriptor);
648                         break;
649                     }
650                     auto  ans = getnameinfo(&in_addr, in_len,
651                                             peerInfo->hostName, NI_MAXHOST,
652                                             peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
653                     if (ans < 0) {
654                         mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
655                         close(peerInfo->fileDescriptor);
656                         break;
657                     }
658                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
659                         mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
660                     }
661                     peerInfo->isConnected = false;
662                     peerInfo->gotSetup = false;
663                     if (addToEpoll(params->epoll_fd,
664                                    peerInfo,
665                                    (EPOLLIN | EPOLLET),
666                                    params->sctpMap, nullptr,
667                                    0) != 0) {
668                         break;
669                     }
670                     break;
671                 }
672             } else if (params->rmrListenFd == events[i].data.fd) {
673                 // got message from XAPP
674                 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
675                 num_of_messages.fetch_add(1, std::memory_order_release);
676                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
677                     mdclog_write(MDCLOG_DEBUG, "new message from RMR");
678                 }
679                 if (receiveXappMessages(params->sctpMap,
680                                         rmrMessageBuffer,
681                                         message.message.time) != 0) {
682                     mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
683                 }
684             } else if (params->inotifyFD == events[i].data.fd) {
685                 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
686                 handleConfigChange(params);
687             } else {
688                 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
689                  * We must read whatever data is available completely, as we are running
690                  *  in edge-triggered mode and won't get a notification again for the same data. */
691                 num_of_messages.fetch_add(1, std::memory_order_release);
692                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
693                     mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
694                 }
695                 receiveDataFromSctp(&events[i],
696                                     params->sctpMap,
697                                     num_of_SCTP_messages,
698                                     rmrMessageBuffer,
699                                     message.message.time);
700             }
701
702             clock_gettime(CLOCK_MONOTONIC, &end);
703             if (mdclog_level_get() >= MDCLOG_INFO) {
704                 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
705                               ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
706             }
707             if (mdclog_level_get() >= MDCLOG_DEBUG) {
708                 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
709                              end.tv_sec - start.tv_sec,
710                              end.tv_nsec - start.tv_nsec);
711             }
712         }
713     }
714 }
715
716 /**
717  *
718  * @param sctpParams
719  */
720 void handleConfigChange(sctp_params_t *sctpParams) {
721     char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
722     const struct inotify_event *event;
723     char *ptr;
724
725     path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
726     auto endlessLoop = true;
727     while (endlessLoop) {
728         auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
729         if (len == -1) {
730             if (errno != EAGAIN) {
731                 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
732                 endlessLoop = false;
733                 continue;
734             }
735             else {
736                 endlessLoop = false;
737                 continue;
738             }
739         }
740
741         for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
742             event = (const struct inotify_event *)ptr;
743             if (event->mask & (uint32_t)IN_ISDIR) {
744                 continue;
745             }
746
747             // the directory name
748             if (sctpParams->inotifyWD == event->wd) {
749                 // not the directory
750             }
751             if (event->len) {
752                 auto  retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
753                 if (retVal != 0) {
754                     continue;
755                 }
756             }
757             // only the file we want
758             if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
759                 if (mdclog_level_get() >= MDCLOG_INFO) {
760                     mdclog_write(MDCLOG_INFO, "Configuration file changed");
761                 }
762                 if (exists(p)) {
763                     const int size = 2048;
764                     auto fileSize = file_size(p);
765                     if (fileSize > size) {
766                         mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
767                         return;
768                     }
769                 } else {
770                     mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
771                     return;
772                 }
773
774                 ReadConfigFile conf;
775                 if (conf.openConfigFile(p.string()) == -1) {
776                     mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
777                                  p.string().c_str(), strerror(errno));
778                     return;
779                 }
780
781                 auto tmpStr = conf.getStringValue("loglevel");
782                 if (tmpStr.length() == 0) {
783                     mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
784                     tmpStr = "info";
785                 }
786                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
787
788                 if ((tmpStr.compare("debug")) == 0) {
789                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
790                     sctpParams->logLevel = MDCLOG_DEBUG;
791                 } else if ((tmpStr.compare("info")) == 0) {
792                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
793                     sctpParams->logLevel = MDCLOG_INFO;
794                 } else if ((tmpStr.compare("warning")) == 0) {
795                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
796                     sctpParams->logLevel = MDCLOG_WARN;
797                 } else if ((tmpStr.compare("error")) == 0) {
798                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
799                     sctpParams->logLevel = MDCLOG_ERR;
800                 } else {
801                     mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
802                     sctpParams->logLevel = MDCLOG_INFO;
803                 }
804                 mdclog_level_set(sctpParams->logLevel);
805
806
807                 tmpStr = conf.getStringValue("trace");
808                 if (tmpStr.length() == 0) {
809                     mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
810                     tmpStr = "stop";
811                 }
812
813                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
814                 if ((tmpStr.compare("start")) == 0) {
815                     mdclog_write(MDCLOG_INFO, "Trace set to: start");
816                     sctpParams->trace = true;
817                 } else if ((tmpStr.compare("stop")) == 0) {
818                     mdclog_write(MDCLOG_INFO, "Trace set to: stop");
819                     sctpParams->trace = false;
820                 } else {
821                     mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
822                     sctpParams->trace = false;
823                 }
824                 jsonTrace = sctpParams->trace;
825
826
827                 endlessLoop = false;
828             }
829         }
830     }
831 }
832
833 /**
834  *
835  * @param event
836  * @param message
837  * @param rmrMessageBuffer
838  * @param params
839  */
840 void handleEinprogressMessages(struct epoll_event &event,
841                                ReportingMessages_t &message,
842                                RmrMessagesBuffer_t &rmrMessageBuffer,
843                                sctp_params_t *params) {
844     auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
845     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
846
847     mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
848     auto retVal = 0;
849     socklen_t retValLen = 0;
850     auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
851     if (rc != 0 || retVal != 0) {
852         if (rc != 0) {
853             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
854                                                          "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
855                                                          peerInfo->enodbName, strerror(errno));
856         } else if (retVal != 0) {
857             rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
858                                                          "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
859                                                          peerInfo->enodbName);
860         }
861
862         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
863         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
864         mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
865         message.message.direction = 'N';
866         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
867             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
868         }
869         memset(peerInfo->asnData, 0, peerInfo->asnLength);
870         peerInfo->asnLength = 0;
871         peerInfo->mtype = 0;
872         return;
873     }
874
875     peerInfo->isConnected = true;
876
877     if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
878                       peerInfo->mtype) != 0) {
879         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
880         return;
881     }
882
883     message.message.asndata = (unsigned char *)peerInfo->asnData;
884     message.message.asnLength = peerInfo->asnLength;
885     message.message.messageType = peerInfo->mtype;
886     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
887     num_of_messages.fetch_add(1, std::memory_order_release);
888     if (mdclog_level_get() >= MDCLOG_DEBUG) {
889         mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
890                      message.message.enodbName);
891     }
892     if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
893         if (mdclog_level_get() >= MDCLOG_DEBUG) {
894             mdclog_write(MDCLOG_DEBUG, "Error write to SCTP  %s %d", __func__, __LINE__);
895         }
896         return;
897     }
898
899     memset(peerInfo->asnData, 0, peerInfo->asnLength);
900     peerInfo->asnLength = 0;
901     peerInfo->mtype = 0;
902 }
903
904
905 void handlepoll_error(struct epoll_event &event,
906                       ReportingMessages_t &message,
907                       RmrMessagesBuffer_t &rmrMessageBuffer,
908                       sctp_params_t *params) {
909     if (event.data.fd != params->rmrListenFd) {
910         auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
911         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
912                      event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
913
914         rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
915                                                      "%s|Failed SCTP Connection",
916                                                      peerInfo->enodbName);
917         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
918         message.message.asnLength = rmrMessageBuffer.sendMessage->len;
919
920         memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
921         message.message.direction = 'N';
922         if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
923             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
924         }
925
926         close(peerInfo->fileDescriptor);
927         params->sctpMap->erase(peerInfo->enodbName);
928         cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
929     } else {
930         mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
931     }
932 }
933 /**
934  *
935  * @param socket
936  * @return
937  */
938 int setSocketNoBlocking(int socket) {
939     auto flags = fcntl(socket, F_GETFL, 0);
940
941     if (flags == -1) {
942         mdclog_mdc_add("func", "fcntl");
943         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
944         mdclog_mdc_clean();
945         return -1;
946     }
947
948     flags = (unsigned) flags | (unsigned) O_NONBLOCK;
949     if (fcntl(socket, F_SETFL, flags) == -1) {
950         mdclog_mdc_add("func", "fcntl");
951         mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
952         mdclog_mdc_clean();
953         return -1;
954     }
955
956     return 0;
957 }
958
959 /**
960  *
961  * @param val
962  * @param m
963  */
964 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
965     char *dummy;
966     auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
967     char searchBuff[2048]{};
968
969     snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
970     m->erase(searchBuff);
971
972     m->erase(val->enodbName);
973     free(val);
974 }
975
976 /**
977  *
978  * @param fd file discriptor
979  * @param data the asn data to send
980  * @param len  length of the data
981  * @param enodbName the enodbName as in the map for printing purpose
982  * @param m map host information
983  * @param mtype message number
984  * @return 0 success, anegative number on fail
985  */
986 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
987     auto loglevel = mdclog_level_get();
988     int fd = peerInfo->fileDescriptor;
989     if (loglevel >= MDCLOG_DEBUG) {
990         mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
991                      message.message.enodbName, __FUNCTION__);
992     }
993
994     while (true) {
995         if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
996             if (errno == EINTR) {
997                 continue;
998             }
999             mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1000             if (!peerInfo->isConnected) {
1001                 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1002                 return -1;
1003             }
1004             cleanHashEntry(peerInfo, m);
1005             close(fd);
1006             char key[MAX_ENODB_NAME_SIZE * 2];
1007             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1008                      message.message.messageType);
1009             if (loglevel >= MDCLOG_DEBUG) {
1010                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1011             }
1012             auto tmp = m->find(key);
1013             if (tmp) {
1014                 free(tmp);
1015             }
1016             m->erase(key);
1017             return -1;
1018         }
1019         message.message.direction = 'D';
1020         // send report.buffer of size
1021         buildJsonMessage(message);
1022
1023         if (loglevel >= MDCLOG_DEBUG) {
1024             mdclog_write(MDCLOG_DEBUG,
1025                          "SCTP message for CU %s sent from %s",
1026                          message.message.enodbName,
1027                          __FUNCTION__);
1028         }
1029         return 0;
1030     }
1031 }
1032
1033 /**
1034  *
1035  * @param message
1036  * @param rmrMessageBuffer
1037  */
1038 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1039     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1040     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1041
1042     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1043         mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1044                      message.message.enodbName, (unsigned long) message.message.asnLength);
1045     }
1046 }
1047
1048
1049
1050 /**
1051  *
1052  * @param events
1053  * @param sctpMap
1054  * @param numOfMessages
1055  * @param rmrMessageBuffer
1056  * @param ts
1057  * @return
1058  */
1059 int receiveDataFromSctp(struct epoll_event *events,
1060                         Sctp_Map_t *sctpMap,
1061                         int &numOfMessages,
1062                         RmrMessagesBuffer_t &rmrMessageBuffer,
1063                         struct timespec &ts) {
1064     /* We have data on the fd waiting to be read. Read and display it.
1065  * We must read whatever data is available completely, as we are running
1066  *  in edge-triggered mode and won't get a notification again for the same data. */
1067     ReportingMessages_t message {};
1068     auto done = 0;
1069     auto loglevel = mdclog_level_get();
1070
1071     // get the identity of the interface
1072     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1073
1074     struct timespec start{0, 0};
1075     struct timespec decodestart{0, 0};
1076     struct timespec end{0, 0};
1077
1078     E2AP_PDU_t *pdu = nullptr;
1079
1080     while (true) {
1081         if (loglevel >= MDCLOG_DEBUG) {
1082             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1083             clock_gettime(CLOCK_MONOTONIC, &start);
1084         }
1085         // read the buffer directly to rmr payload
1086         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1087         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1088                 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1089
1090         if (loglevel >= MDCLOG_DEBUG) {
1091             mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1092                          message.peerInfo->fileDescriptor, message.message.asnLength);
1093         }
1094
1095         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1096         message.message.direction = 'U';
1097         message.message.time.tv_nsec = ts.tv_nsec;
1098         message.message.time.tv_sec = ts.tv_sec;
1099
1100         if (message.message.asnLength < 0) {
1101             if (errno == EINTR) {
1102                 continue;
1103             }
1104             /* If errno == EAGAIN, that means we have read all
1105                data. So goReportingMessages_t back to the main loop. */
1106             if (errno != EAGAIN) {
1107                 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1108                 done = 1;
1109             } else if (loglevel >= MDCLOG_DEBUG) {
1110                 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1111             }
1112             break;
1113         } else if (message.message.asnLength == 0) {
1114             /* End of file. The remote has closed the connection. */
1115             if (loglevel >= MDCLOG_INFO) {
1116                 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1117                              message.peerInfo->fileDescriptor);
1118             }
1119             done = 1;
1120             break;
1121         }
1122
1123         if (loglevel >= MDCLOG_DEBUG) {
1124             char printBuffer[4096]{};
1125             char *tmp = printBuffer;
1126             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1127                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1128                 tmp += 2;
1129             }
1130             printBuffer[message.message.asnLength] = 0;
1131             clock_gettime(CLOCK_MONOTONIC, &end);
1132             mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1133                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1134             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
1135                          printBuffer);
1136             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1137         }
1138
1139         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1140                           message.message.asndata, message.message.asnLength);
1141         if (rval.code != RC_OK) {
1142             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1143                          message.peerInfo->enodbName);
1144             break;
1145         }
1146
1147         if (loglevel >= MDCLOG_DEBUG) {
1148             clock_gettime(CLOCK_MONOTONIC, &end);
1149             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1150                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1151             char *printBuffer;
1152             size_t size;
1153             FILE *stream = open_memstream(&printBuffer, &size);
1154             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1155             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1156             clock_gettime(CLOCK_MONOTONIC, &decodestart);
1157         }
1158
1159         switch (pdu->present) {
1160             case E2AP_PDU_PR_initiatingMessage: {//initiating message
1161                 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1162                 break;
1163             }
1164             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1165                 asnSuccsesfulMsg(pdu, sctpMap, message,  rmrMessageBuffer);
1166                 break;
1167             }
1168             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1169                 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1170                 break;
1171             }
1172             default:
1173                 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1174                 break;
1175         }
1176         if (loglevel >= MDCLOG_DEBUG) {
1177             clock_gettime(CLOCK_MONOTONIC, &end);
1178             mdclog_write(MDCLOG_DEBUG,
1179                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1180                          message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1181         }
1182         numOfMessages++;
1183         if (pdu != nullptr) {
1184             ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1185             //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1186             //pdu = nullptr;
1187         }
1188     }
1189
1190     if (done) {
1191         if (loglevel >= MDCLOG_INFO) {
1192             mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1193         }
1194         message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1195                 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1196                          256,
1197                          "%s|CU disconnected unexpectedly",
1198                          message.peerInfo->enodbName);
1199         message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1200
1201         if (sendRequestToXapp(message,
1202                               RIC_SCTP_CONNECTION_FAILURE,
1203                               rmrMessageBuffer) != 0) {
1204             mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1205         }
1206
1207         /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1208         close(message.peerInfo->fileDescriptor);
1209         cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1210     }
1211     if (loglevel >= MDCLOG_DEBUG) {
1212         clock_gettime(CLOCK_MONOTONIC, &end);
1213         mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1214                      end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1215
1216     }
1217     return 0;
1218 }
1219
1220 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1221                                      RmrMessagesBuffer_t &rmrMessageBuffer,
1222                                      E2AP_PDU_t *pdu/*,
1223                                      string const &messageName,
1224                                      string const &ieName,
1225                                      vector<string> &functionsToAdd_v,
1226                                      vector<string> &functionsToModified_v*/) {
1227     auto logLevel = mdclog_level_get();
1228     // now we can send the data to e2Mgr
1229
1230     asn_enc_rval_t er;
1231     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1232     unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1233     while (true) {
1234         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1235         if (er.encoded == -1) {
1236             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1237             return;
1238         } else if (er.encoded > (ssize_t) buffer_size) {
1239             buffer_size = er.encoded + 128;
1240             mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1241                          (int) buffer_size,
1242                          asn_DEF_E2AP_PDU.name, buffer_size);
1243             buffer_size = er.encoded + 128;
1244 //            free(buffer);
1245             continue;
1246         }
1247         buffer[er.encoded] = '\0';
1248         break;
1249     }
1250     // encode to xml
1251
1252     string res((char *)buffer);
1253     res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1254     res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1255     res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1256
1257 //    string res {};
1258 //    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1259 //        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1260 //    }
1261     rmr_mbuf_t *rmrMsg;
1262 //    if (res.length() == 0) {
1263 //        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1264 //        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1265 //                               message.peerInfo->sctpParams->myIP.c_str(),
1266 //                               message.peerInfo->sctpParams->rmrPort,
1267 //                               buffer);
1268 //    } else {
1269         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1270         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1271                                message.peerInfo->sctpParams->myIP.c_str(),
1272                                message.peerInfo->sctpParams->rmrPort,
1273                                res.c_str());
1274 //    }
1275
1276     if (logLevel >= MDCLOG_DEBUG) {
1277         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1278     }
1279     // send to RMR
1280     rmrMsg->mtype = message.message.messageType;
1281     rmrMsg->state = 0;
1282     rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1283
1284     static unsigned char tx[32];
1285     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1286     rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1287
1288     rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1289     if (rmrMsg == nullptr) {
1290         mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1291     } else if (rmrMsg->state != 0) {
1292         char meid[RMR_MAX_MEID]{};
1293         if (rmrMsg->state == RMR_ERR_RETRY) {
1294             usleep(5);
1295             rmrMsg->state = 0;
1296             mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1297                          rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1298             rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1299             if (rmrMsg == nullptr) {
1300                 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1301             } else if (rmrMsg->state != 0) {
1302                 mdclog_write(MDCLOG_ERR,
1303                              "RMR Retry failed %s sending request %d to Xapp from %s",
1304                              translateRmrErrorMessages(rmrMsg->state).c_str(),
1305                              rmrMsg->mtype,
1306                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
1307             }
1308         } else {
1309             mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1310                          translateRmrErrorMessages(rmrMsg->state).c_str(),
1311                          rmrMsg->mtype,
1312                          rmr_get_meid(rmrMsg, (unsigned char *) meid));
1313         }
1314     }
1315     message.peerInfo->gotSetup = true;
1316     buildJsonMessage(message);
1317     if (rmrMsg != nullptr) {
1318         rmr_free_msg(rmrMsg);
1319     }
1320 }
1321
1322 #if 0
1323 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1324     auto index = 0;
1325     runFunXML_v.clear();
1326     for (auto j = 0; j < list.list.count; j++) {
1327         auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1328         if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1329             (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1330             // encode to xml
1331             E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1332             auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1333                                    &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1334                                    (void **)&ranFunDef,
1335                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1336                                    raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1337             if (rval.code != RC_OK) {
1338                 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1339                              rval.code,
1340                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1341                 return -1;
1342             }
1343
1344             auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1345             unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1346             memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1347             // encode to xml
1348             auto er = asn_encode_to_buffer(nullptr,
1349                                            ATS_BASIC_XER,
1350                                            &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1351                                            ranFunDef,
1352                                            xml_buffer,
1353                                            xml_buffer_size);
1354             if (er.encoded == -1) {
1355                 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1356                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1357                              strerror(errno));
1358             } else if (er.encoded > (ssize_t)xml_buffer_size) {
1359                 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1360                              (int) xml_buffer_size,
1361                              asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1362             } else {
1363                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1364                     mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1365                                  asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1366                                  index++,
1367                                  xml_buffer);
1368                 }
1369
1370                 string runFuncs = (char *)(xml_buffer);
1371                 runFunXML_v.emplace_back(runFuncs);
1372             }
1373         }
1374     }
1375     return 0;
1376 }
1377
1378 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1379                                      Sctp_Map_t *sctpMap,
1380                                      ReportingMessages_t &message,
1381                                      vector <string> &RANfunctionsAdded_v,
1382                                      vector <string> &RANfunctionsModified_v) {
1383     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1384     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1385         auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1386         if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1387             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1388                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1389                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1390                                  ie->value.choice.RANfunctions_List.list.count);
1391                 }
1392                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1393                     return -1;
1394                 }
1395             }
1396         } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1397             if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1398                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1399                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1400                                  ie->value.choice.RANfunctions_List.list.count);
1401                 }
1402                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1403                     return -1;
1404                 }
1405             }
1406         }
1407     }
1408     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1409         mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1410                      RANfunctionsAdded_v.size());
1411     }
1412     return 0;
1413 }
1414
1415 #endif
1416
1417
1418 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1419     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1420     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1421
1422     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1423     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1424
1425     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1426     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1427
1428     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1429     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1430
1431     peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1432     peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1433     // ---------------------------------------------
1434     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1435     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1436
1437     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1438     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1439
1440     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1441     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1442
1443     peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1444     peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1445     //-------------------------------------------------------------
1446
1447     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1448     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1449
1450     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1451     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1452
1453     peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1454     peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1455
1456     //====================================================================================
1457     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1458     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1459
1460     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1461     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1462
1463     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1464     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1465
1466     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1467     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1468
1469     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1470     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1471
1472     peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1473     peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1474     //---------------------------------------------------------------------------------------------------------
1475     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1476     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1477
1478     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1479     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1480
1481     peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1482     peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1483     //----------------------------------------------------------------------------------------------------------------
1484     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1485     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1486
1487     peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1488     peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1489 }
1490 /**
1491  *
1492  * @param pdu
1493  * @param sctpMap
1494  * @param message
1495  * @param RANfunctionsAdded_v
1496  * @return
1497  */
1498 int collectSetupRequestData(E2AP_PDU_t *pdu,
1499                                      Sctp_Map_t *sctpMap,
1500                                      ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1501     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1502     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1503         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1504         if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1505             // get the ran name for meid
1506             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1507                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1508                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1509                     // no mesage will be sent
1510                     return -1;
1511                 }
1512
1513                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1514                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1515             }
1516         } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1517             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1518                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1519                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1520                                  ie->value.choice.RANfunctions_List.list.count);
1521                 }
1522                 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1523                     return -1;
1524                 }
1525             }
1526         } */
1527     }
1528 //    if (mdclog_level_get() >= MDCLOG_DEBUG) {
1529 //        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1530 //                     RANfunctionsAdded_v.size());
1531 //    }
1532     return 0;
1533 }
1534
1535 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1536     E2AP_PDU_t *pdu = nullptr;
1537
1538     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1539         mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1540                      rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1541     }
1542     auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1543                            rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1544     if (rval.code != RC_OK) {
1545         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1546                      rval.code,
1547                      message.message.enodbName);
1548         return -1;
1549     }
1550
1551     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1552     auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1553                                    rmrMessageBuffer.sendMessage->payload, buff_size);
1554     if (er.encoded == -1) {
1555         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1556         return -1;
1557     } else if (er.encoded > (ssize_t)buff_size) {
1558         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1559                      (int)rmrMessageBuffer.sendMessage->len,
1560                      asn_DEF_E2AP_PDU.name,
1561                      __func__,
1562                      __LINE__);
1563         return -1;
1564     }
1565     rmrMessageBuffer.sendMessage->len = er.encoded;
1566     return 0;
1567
1568 }
1569
1570 /**
1571  *
1572  * @param pdu
1573  * @param message
1574  * @param rmrMessageBuffer
1575  */
1576 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1577                           Sctp_Map_t *sctpMap,
1578                           ReportingMessages_t &message,
1579                           RmrMessagesBuffer_t &rmrMessageBuffer) {
1580     auto logLevel = mdclog_level_get();
1581     auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1582     if (logLevel >= MDCLOG_DEBUG) {
1583         mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1584     }
1585     switch (procedureCode) {
1586         case ProcedureCode_id_E2setup: {
1587             if (logLevel >= MDCLOG_DEBUG) {
1588                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1589             }
1590
1591 //            vector <string> RANfunctionsAdded_v;
1592 //            vector <string> RANfunctionsModified_v;
1593 //            RANfunctionsAdded_v.clear();
1594 //            RANfunctionsModified_v.clear();
1595             if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1596                 break;
1597             }
1598
1599             buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1600
1601             string messageName("E2setupRequest");
1602             string ieName("E2setupRequestIEs");
1603             message.message.messageType = RIC_E2_SETUP_REQ;
1604             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1605             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1606             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1607             break;
1608         }
1609         case ProcedureCode_id_RICserviceUpdate: {
1610             if (logLevel >= MDCLOG_DEBUG) {
1611                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1612             }
1613 //            vector <string> RANfunctionsAdded_v;
1614 //            vector <string> RANfunctionsModified_v;
1615 //            RANfunctionsAdded_v.clear();
1616 //            RANfunctionsModified_v.clear();
1617 //            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1618 //                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1619 //                break;
1620 //            }
1621
1622             string messageName("RICserviceUpdate");
1623             string ieName("RICserviceUpdateIEs");
1624             message.message.messageType = RIC_SERVICE_UPDATE;
1625             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1626             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1627
1628             buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1629             break;
1630         }
1631         case ProcedureCode_id_ErrorIndication: {
1632             if (logLevel >= MDCLOG_DEBUG) {
1633                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1634             }
1635             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1636             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1637             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1638                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1639             }
1640             break;
1641         }
1642         case ProcedureCode_id_Reset: {
1643             if (logLevel >= MDCLOG_DEBUG) {
1644                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1645             }
1646
1647             message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1648             message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1649             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1650                 break;
1651             }
1652
1653             if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1654                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1655             }
1656             break;
1657         }
1658         case ProcedureCode_id_RICindication: {
1659             if (logLevel >= MDCLOG_DEBUG) {
1660                 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1661             }
1662             for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1663                 auto messageSent = false;
1664                 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1665                 if (logLevel >= MDCLOG_DEBUG) {
1666                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1667                 }
1668                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1669                     if (logLevel >= MDCLOG_DEBUG) {
1670                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1671                     }
1672                     if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1673                         static unsigned char tx[32];
1674                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1675                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1676                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1677                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1678                                        (unsigned char *)message.message.enodbName,
1679                                        strlen(message.message.enodbName));
1680                         rmrMessageBuffer.sendMessage->state = 0;
1681                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1682
1683                         //ie->value.choice.RICrequestID.ricInstanceID;
1684                         if (mdclog_level_get() >= MDCLOG_DEBUG) {
1685                             mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1686                                          rmrMessageBuffer.sendMessage->sub_id,
1687                                          rmrMessageBuffer.sendMessage->mtype,
1688                                          ie->value.choice.RICrequestID.ricInstanceID,
1689                                          ie->value.choice.RICrequestID.ricRequestorID);
1690                         }
1691                         message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1692                         message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1693                         sendRmrMessage(rmrMessageBuffer, message);
1694                         messageSent = true;
1695                     } else {
1696                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1697                     }
1698                 }
1699                 if (messageSent) {
1700                     break;
1701                 }
1702             }
1703             break;
1704         }
1705         default: {
1706             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1707             message.message.messageType = 0; // no RMR message type yet
1708
1709             buildJsonMessage(message);
1710
1711             break;
1712         }
1713     }
1714 }
1715
1716 /**
1717  *
1718  * @param pdu
1719  * @param message
1720  * @param rmrMessageBuffer
1721  */
1722 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1723                       Sctp_Map_t *sctpMap,
1724                       ReportingMessages_t &message,
1725                       RmrMessagesBuffer_t &rmrMessageBuffer) {
1726     auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1727     auto logLevel = mdclog_level_get();
1728     if (logLevel >= MDCLOG_INFO) {
1729         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1730     }
1731     switch (procedureCode) {
1732         case ProcedureCode_id_Reset: {
1733             if (logLevel >= MDCLOG_DEBUG) {
1734                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1735             }
1736             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1737             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1738             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1739                 break;
1740             }
1741             if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1742                 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1743             }
1744             break;
1745         }
1746         case ProcedureCode_id_RICcontrol: {
1747             if (logLevel >= MDCLOG_DEBUG) {
1748                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1749             }
1750             for (auto i = 0;
1751                  i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1752                 auto messageSent = false;
1753                 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1754                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1755                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1756                 }
1757                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1758                     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1759                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1760                     }
1761                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1762                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1763                         rmrMessageBuffer.sendMessage->state = 0;
1764 //                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1765                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1766
1767                         static unsigned char tx[32];
1768                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1769                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1770                         rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1771                                        (unsigned char *)message.message.enodbName,
1772                                        strlen(message.message.enodbName));
1773
1774                         message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1775                         message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1776                         sendRmrMessage(rmrMessageBuffer, message);
1777                         messageSent = true;
1778                     } else {
1779                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1780                     }
1781                 }
1782                 if (messageSent) {
1783                     break;
1784                 }
1785             }
1786
1787             break;
1788         }
1789         case ProcedureCode_id_RICsubscription: {
1790             if (logLevel >= MDCLOG_DEBUG) {
1791                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1792             }
1793             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1794             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1795             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1796                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1797             }
1798             break;
1799         }
1800         case ProcedureCode_id_RICsubscriptionDelete: {
1801             if (logLevel >= MDCLOG_DEBUG) {
1802                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1803             }
1804             message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1805             message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1806             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1807                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1808             }
1809             break;
1810         }
1811         default: {
1812             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1813             message.message.messageType = 0; // no RMR message type yet
1814             buildJsonMessage(message);
1815
1816             break;
1817         }
1818     }
1819 }
1820
1821 /**
1822  *
1823  * @param pdu
1824  * @param message
1825  * @param rmrMessageBuffer
1826  */
1827 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1828                         Sctp_Map_t *sctpMap,
1829                         ReportingMessages_t &message,
1830                         RmrMessagesBuffer_t &rmrMessageBuffer) {
1831     auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1832     auto logLevel = mdclog_level_get();
1833     if (logLevel >= MDCLOG_INFO) {
1834         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1835     }
1836     switch (procedureCode) {
1837         case ProcedureCode_id_RICcontrol: {
1838             if (logLevel >= MDCLOG_DEBUG) {
1839                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1840             }
1841             for (int i = 0;
1842                  i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1843                 auto messageSent = false;
1844                 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1845                 if (logLevel >= MDCLOG_DEBUG) {
1846                     mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1847                 }
1848                 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1849                     if (logLevel >= MDCLOG_DEBUG) {
1850                         mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1851                     }
1852                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1853                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1854                         rmrMessageBuffer.sendMessage->state = 0;
1855 //                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1856                         rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1857                         static unsigned char tx[32];
1858                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1859                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1860                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1861                                        strlen(message.message.enodbName));
1862                         message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1863                         message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1864                         sendRmrMessage(rmrMessageBuffer, message);
1865                         messageSent = true;
1866                     } else {
1867                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1868                     }
1869                 }
1870                 if (messageSent) {
1871                     break;
1872                 }
1873             }
1874             break;
1875         }
1876         case ProcedureCode_id_RICsubscription: {
1877             if (logLevel >= MDCLOG_DEBUG) {
1878                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1879             }
1880             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1881             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1882             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1883                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1884             }
1885             break;
1886         }
1887         case ProcedureCode_id_RICsubscriptionDelete: {
1888             if (logLevel >= MDCLOG_DEBUG) {
1889                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1890             }
1891             message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1892             message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1893             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1894                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1895             }
1896             break;
1897         }
1898         default: {
1899             mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1900             message.message.messageType = 0; // no RMR message type yet
1901
1902             buildJsonMessage(message);
1903
1904             break;
1905         }
1906     }
1907 }
1908
1909 /**
1910  *
1911  * @param message
1912  * @param requestId
1913  * @param rmrMmessageBuffer
1914  * @return
1915  */
1916 int sendRequestToXapp(ReportingMessages_t &message,
1917                       int requestId,
1918                       RmrMessagesBuffer_t &rmrMmessageBuffer) {
1919     rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1920                    (unsigned char *)message.message.enodbName,
1921                    strlen(message.message.enodbName));
1922     message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1923     rmrMmessageBuffer.sendMessage->state = 0;
1924     static unsigned char tx[32];
1925     snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1926     rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1927
1928     auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1929     return rc;
1930 }
1931
1932 /**
1933  *
1934  * @param pSctpParams
1935  */
1936 void getRmrContext(sctp_params_t &pSctpParams) {
1937     pSctpParams.rmrCtx = nullptr;
1938     pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1939     if (pSctpParams.rmrCtx == nullptr) {
1940         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1941         return;
1942     }
1943
1944     rmr_set_stimeout(pSctpParams.rmrCtx, 0);    // disable retries for any send operation
1945     // we need to find that routing table exist and we can run
1946     if (mdclog_level_get() >= MDCLOG_INFO) {
1947         mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
1948     }
1949     int rmrReady = 0;
1950     int count = 0;
1951     while (!rmrReady) {
1952         if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
1953             sleep(1);
1954         }
1955         count++;
1956         if (count % 60 == 0) {
1957             mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
1958         }
1959     }
1960     if (mdclog_level_get() >= MDCLOG_INFO) {
1961         mdclog_write(MDCLOG_INFO, "RMR running");
1962     }
1963     rmr_init_trace(pSctpParams.rmrCtx, 200);
1964     // get the RMR fd for the epoll
1965     pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
1966     struct epoll_event event{};
1967     // add RMR fd to epoll
1968     event.events = (EPOLLIN);
1969     event.data.fd = pSctpParams.rmrListenFd;
1970     // add listening RMR FD to epoll
1971     if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
1972         mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
1973         close(pSctpParams.rmrListenFd);
1974         rmr_close(pSctpParams.rmrCtx);
1975         pSctpParams.rmrCtx = nullptr;
1976     }
1977 }
1978
1979 /**
1980  *
1981  * @param message
1982  * @param rmrMessageBuffer
1983  * @return
1984  */
1985 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1986     E2AP_PDU_t *pdu = nullptr;
1987
1988     if (mdclog_level_get() >= MDCLOG_DEBUG) {
1989         mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
1990                 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
1991     }
1992     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1993                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
1994     if (rval.code != RC_OK) {
1995         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
1996                      rval.code,
1997                      message.message.enodbName);
1998         return -1;
1999     }
2000
2001     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2002     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2003                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
2004     if (er.encoded == -1) {
2005         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2006         return -1;
2007     } else if (er.encoded > (ssize_t)buff_size) {
2008         mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2009                      (int)rmrMessageBuffer.rcvMessage->len,
2010                      asn_DEF_E2AP_PDU.name,
2011                      __func__,
2012                      __LINE__);
2013         return -1;
2014     }
2015     rmrMessageBuffer.rcvMessage->len = er.encoded;
2016     return 0;
2017 }
2018
2019 /**
2020  *
2021  * @param sctpMap
2022  * @param rmrMessageBuffer
2023  * @param ts
2024  * @return
2025  */
2026 int receiveXappMessages(Sctp_Map_t *sctpMap,
2027                         RmrMessagesBuffer_t &rmrMessageBuffer,
2028                         struct timespec &ts) {
2029     if (rmrMessageBuffer.rcvMessage == nullptr) {
2030         //we have error
2031         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2032         return -1;
2033     }
2034
2035     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2036         mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2037     }
2038     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2039     if (rmrMessageBuffer.rcvMessage == nullptr) {
2040         mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2041         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2042         return -2;
2043     }
2044     ReportingMessages_t message;
2045     message.message.direction = 'D';
2046     message.message.time.tv_nsec = ts.tv_nsec;
2047     message.message.time.tv_sec = ts.tv_sec;
2048
2049     // get message payload
2050     //auto msgData = msg->payload;
2051     if (rmrMessageBuffer.rcvMessage->state != 0) {
2052         mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2053         return -1;
2054     }
2055     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2056     message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2057     if (message.peerInfo == nullptr) {
2058         auto type = rmrMessageBuffer.rcvMessage->mtype;
2059         switch (type) {
2060             case RIC_SCTP_CLEAR_ALL:
2061             case E2_TERM_KEEP_ALIVE_REQ:
2062             case RIC_HEALTH_CHECK_REQ:
2063                 break;
2064             default:
2065                 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2066                 return -1;
2067         }
2068     }
2069
2070     switch (rmrMessageBuffer.rcvMessage->mtype) {
2071         case RIC_E2_SETUP_RESP : {
2072             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2073                 break;
2074             }
2075             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2076             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2077             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2078                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2079                 return -6;
2080             }
2081             break;
2082         }
2083         case RIC_E2_SETUP_FAILURE : {
2084             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2085                 break;
2086             }
2087             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2088             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2089             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2090                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2091                 return -6;
2092             }
2093             break;
2094         }
2095         case RIC_ERROR_INDICATION: {
2096             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2097             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2098             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2099                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2100                 return -6;
2101             }
2102             break;
2103         }
2104         case RIC_SUB_REQ: {
2105             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2106             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2107             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2108                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2109                 return -6;
2110             }
2111             break;
2112         }
2113         case RIC_SUB_DEL_REQ: {
2114             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2115             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2116             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2117                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2118                 return -6;
2119             }
2120             break;
2121         }
2122         case RIC_CONTROL_REQ: {
2123             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2124             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2125             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2126                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2127                 return -6;
2128             }
2129             break;
2130         }
2131         case RIC_SERVICE_QUERY: {
2132             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2133                 break;
2134             }
2135             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2136             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2137             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2138                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2139                 return -6;
2140             }
2141             break;
2142         }
2143         case RIC_SERVICE_UPDATE_ACK: {
2144             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2145                 break;
2146             }
2147             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2148             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2149             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2150                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2151                 return -6;
2152             }
2153             break;
2154         }
2155         case RIC_SERVICE_UPDATE_FAILURE: {
2156             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2157                 break;
2158             }
2159             message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2160             message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2161             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2162                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2163                 return -6;
2164             }
2165             break;
2166         }
2167         case RIC_E2_RESET_REQ: {
2168             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2169                 break;
2170             }
2171             message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2172             message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2173             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2174                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2175                 return -6;
2176             }
2177             break;
2178         }
2179         case RIC_E2_RESET_RESP: {
2180             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2181                 break;
2182             }
2183             message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2184             message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2185             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2186                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2187                 return -6;
2188             }
2189             break;
2190         }
2191         case RIC_SCTP_CLEAR_ALL: {
2192             mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2193             // loop on all keys and close socket and then erase all map.
2194             vector<char *> v;
2195             sctpMap->getKeys(v);
2196             for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2197                 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2198                     auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2199                     if (peerInfo == nullptr) {
2200                         continue;
2201                     }
2202                     close(peerInfo->fileDescriptor);
2203                     memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2204                     message.message.direction = 'D';
2205                     message.message.time.tv_nsec = ts.tv_nsec;
2206                     message.message.time.tv_sec = ts.tv_sec;
2207
2208                     message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2209                             snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2210                                      256,
2211                                      "%s|RIC_SCTP_CLEAR_ALL",
2212                                      peerInfo->enodbName);
2213                     message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2214                     mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2215                     if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2216                         mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2217                     }
2218                     free(peerInfo);
2219                 }
2220             }
2221
2222             sleep(1);
2223             sctpMap->clear();
2224             break;
2225         }
2226         case E2_TERM_KEEP_ALIVE_REQ: {
2227             // send message back
2228             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2229                               (unsigned char *)rmrMessageBuffer.ka_message,
2230                               rmrMessageBuffer.ka_message_len);
2231             rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2232             rmrMessageBuffer.sendMessage->state = 0;
2233             static unsigned char tx[32];
2234             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2235             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2236             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2237             if (rmrMessageBuffer.sendMessage == nullptr) {
2238                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2239                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2240             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2241                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2242                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2243             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2244                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2245             }
2246
2247             break;
2248         }
2249         case RIC_HEALTH_CHECK_REQ: {
2250             // send message back
2251             rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2252                               (unsigned char *)"OK",
2253                               2);
2254             rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2255             rmrMessageBuffer.sendMessage->state = 0;
2256             static unsigned char tx[32];
2257             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2258             rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2259             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2260             if (rmrMessageBuffer.sendMessage == nullptr) {
2261                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2262                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2263             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
2264                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2265                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2266             } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2267                 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2268             }
2269
2270             break;
2271         }
2272
2273         default:
2274             mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2275             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2276             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2277             message.message.time.tv_nsec = ts.tv_nsec;
2278             message.message.time.tv_sec = ts.tv_sec;
2279             message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2280
2281             buildJsonMessage(message);
2282
2283
2284             return -7;
2285     }
2286     if (mdclog_level_get() >= MDCLOG_DEBUG) {
2287         mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2288     }
2289     return 0;
2290 }
2291
2292 /**
2293  * Send message to the CU that is not expecting for successful or unsuccessful results
2294  * @param messageBuffer
2295  * @param message
2296  * @param failedMsgId
2297  * @param sctpMap
2298  * @return
2299  */
2300 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2301                            ReportingMessages_t &message,
2302                            int failedMsgId,
2303                            Sctp_Map_t *sctpMap) {
2304
2305     getRequestMetaData(message, messageBuffer);
2306     if (mdclog_level_get() >= MDCLOG_INFO) {
2307         mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2308     }
2309
2310     auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2311     return rc;
2312 }
2313
2314 /**
2315  *
2316  * @param sctpMap
2317  * @param messageBuffer
2318  * @param message
2319  * @param failedMesgId
2320  * @return
2321  */
2322 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2323                     RmrMessagesBuffer_t &messageBuffer,
2324                     ReportingMessages_t &message,
2325                     int failedMesgId) {
2326     // get the FD
2327     message.message.messageType = messageBuffer.rcvMessage->mtype;
2328     auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2329     return rc;
2330 }
2331
2332
2333 /**
2334  *
2335  * @param epoll_fd
2336  * @param peerInfo
2337  * @param events
2338  * @param sctpMap
2339  * @param enodbName
2340  * @param msgType
2341  * @return
2342  */
2343 int addToEpoll(int epoll_fd,
2344                ConnectedCU_t *peerInfo,
2345                uint32_t events,
2346                Sctp_Map_t *sctpMap,
2347                char *enodbName,
2348                int msgType) {
2349     // Add to Epol
2350     struct epoll_event event{};
2351     event.data.ptr = peerInfo;
2352     event.events = events;
2353     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2354         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2355             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2356                          strerror(errno), __func__, __LINE__);
2357         }
2358         close(peerInfo->fileDescriptor);
2359         if (enodbName != nullptr) {
2360             cleanHashEntry(peerInfo, sctpMap);
2361             char key[MAX_ENODB_NAME_SIZE * 2];
2362             snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2363             if (mdclog_level_get() >= MDCLOG_DEBUG) {
2364                 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2365             }
2366             auto tmp = sctpMap->find(key);
2367             if (tmp) {
2368                 free(tmp);
2369                 sctpMap->erase(key);
2370             }
2371         } else {
2372             peerInfo->enodbName[0] = 0;
2373         }
2374         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2375         return -1;
2376     }
2377     return 0;
2378 }
2379
2380 /**
2381  *
2382  * @param epoll_fd
2383  * @param peerInfo
2384  * @param events
2385  * @param sctpMap
2386  * @param enodbName
2387  * @param msgType
2388  * @return
2389  */
2390 int modifyToEpoll(int epoll_fd,
2391                   ConnectedCU_t *peerInfo,
2392                   uint32_t events,
2393                   Sctp_Map_t *sctpMap,
2394                   char *enodbName,
2395                   int msgType) {
2396     // Add to Epol
2397     struct epoll_event event{};
2398     event.data.ptr = peerInfo;
2399     event.events = events;
2400     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2401         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2402             mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2403                          strerror(errno), __func__, __LINE__);
2404         }
2405         close(peerInfo->fileDescriptor);
2406         cleanHashEntry(peerInfo, sctpMap);
2407         char key[MAX_ENODB_NAME_SIZE * 2];
2408         snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2409         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2410             mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2411         }
2412         auto tmp = sctpMap->find(key);
2413         if (tmp) {
2414             free(tmp);
2415         }
2416         sctpMap->erase(key);
2417         mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2418         return -1;
2419     }
2420     return 0;
2421 }
2422
2423
2424 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2425     buildJsonMessage(message);
2426
2427     rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2428
2429     if (rmrMessageBuffer.sendMessage == nullptr) {
2430         rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2431         mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2432         return -1;
2433     }
2434
2435     if (rmrMessageBuffer.sendMessage->state != 0) {
2436         char meid[RMR_MAX_MEID]{};
2437         if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2438             usleep(5);
2439             rmrMessageBuffer.sendMessage->state = 0;
2440             mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2441                          rmrMessageBuffer.sendMessage->mtype,
2442                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2443             rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2444             if (rmrMessageBuffer.sendMessage == nullptr) {
2445                 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2446                 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2447                 return -1;
2448             } else if (rmrMessageBuffer.sendMessage->state != 0) {
2449                 mdclog_write(MDCLOG_ERR,
2450                              "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2451                              translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2452                              rmrMessageBuffer.sendMessage->mtype,
2453                              rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2454                 auto rc = rmrMessageBuffer.sendMessage->state;
2455                 return rc;
2456             }
2457         } else {
2458             mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2459                          translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2460                          rmrMessageBuffer.sendMessage->mtype,
2461                          rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2462             return rmrMessageBuffer.sendMessage->state;
2463         }
2464     }
2465     return 0;
2466 }
2467
2468 void buildJsonMessage(ReportingMessages_t &message) {
2469     if (jsonTrace) {
2470         message.outLen = sizeof(message.base64Data);
2471         base64::encode((const unsigned char *) message.message.asndata,
2472                        (const int) message.message.asnLength,
2473                        message.base64Data,
2474                        message.outLen);
2475         if (mdclog_level_get() >= MDCLOG_DEBUG) {
2476             mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2477                          (int) message.message.asnLength,
2478                          (int) message.outLen);
2479         }
2480
2481         snprintf(message.buffer, sizeof(message.buffer),
2482                  "{\"header\": {\"ts\": \"%ld.%09ld\","
2483                  "\"ranName\": \"%s\","
2484                  "\"messageType\": %d,"
2485                  "\"direction\": \"%c\"},"
2486                  "\"base64Length\": %d,"
2487                  "\"asnBase64\": \"%s\"}",
2488                  message.message.time.tv_sec,
2489                  message.message.time.tv_nsec,
2490                  message.message.enodbName,
2491                  message.message.messageType,
2492                  message.message.direction,
2493                  (int) message.outLen,
2494                  message.base64Data);
2495         static src::logger_mt &lg = my_logger::get();
2496
2497         BOOST_LOG(lg) << message.buffer;
2498     }
2499 }
2500
2501
2502 /**
2503  * take RMR error code to string
2504  * @param state
2505  * @return
2506  */
2507 string translateRmrErrorMessages(int state) {
2508     string str = {};
2509     switch (state) {
2510         case RMR_OK:
2511             str = "RMR_OK - state is good";
2512             break;
2513         case RMR_ERR_BADARG:
2514             str = "RMR_ERR_BADARG - argument passd to function was unusable";
2515             break;
2516         case RMR_ERR_NOENDPT:
2517             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2518             break;
2519         case RMR_ERR_EMPTY:
2520             str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2521             break;
2522         case RMR_ERR_NOHDR:
2523             str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2524             break;
2525         case RMR_ERR_SENDFAILED:
2526             str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2527             break;
2528         case RMR_ERR_CALLFAILED:
2529             str = "RMR_ERR_CALLFAILED - unable to send call() message";
2530             break;
2531         case RMR_ERR_NOWHOPEN:
2532             str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2533             break;
2534         case RMR_ERR_WHID:
2535             str = "RMR_ERR_WHID - wormhole id was invalid";
2536             break;
2537         case RMR_ERR_OVERFLOW:
2538             str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2539             break;
2540         case RMR_ERR_RETRY:
2541             str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2542             break;
2543         case RMR_ERR_RCVFAILED:
2544             str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2545             break;
2546         case RMR_ERR_TIMEOUT:
2547             str = "RMR_ERR_TIMEOUT - message processing call timed out";
2548             break;
2549         case RMR_ERR_UNSET:
2550             str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2551             break;
2552         case RMR_ERR_TRUNC:
2553             str = "RMR_ERR_TRUNC - received message likely truncated";
2554             break;
2555         case RMR_ERR_INITFAILED:
2556             str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2557             break;
2558         case RMR_ERR_NOTSUPP:
2559             str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2560             break;
2561         default:
2562             char buf[128]{};
2563             snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
2564             str = buf;
2565             break;
2566     }
2567     return str;
2568 }
2569
2570