1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 // platform project (RICP).
19 // TODO: High-level file comment.
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
32 #include <sys/inotify.h>
39 //using namespace std::placeholders;
40 using namespace boost::filesystem;
41 using namespace prometheus;
49 // need to expose without the include of gcov
50 extern "C" void __gcov_flush(void);
51 #define LOG_FILE_CONFIG_MAP "CONFIG_MAP_NAME"
53 static void catch_function(int signal) {
59 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
61 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
62 double cpuClock = 0.0;
63 bool jsonTrace = false;
66 static int enable_log_change_notify(const char* fileName)
70 if ( lstat(fileName,&fileInfo) == 0 )
72 ret = register_log_change_notify(fileName);
78 static int register_log_change_notify(const char *fileName)
80 pthread_attr_t cb_attr;
82 pthread_attr_init(&cb_attr);
83 pthread_attr_setdetachstate(&cb_attr,PTHREAD_CREATE_DETACHED);
84 return pthread_create(&tid, &cb_attr,&monitor_loglevel_change_handler,(void *)strdup(fileName));
88 static void * monitor_loglevel_change_handler(void* arg)
90 char *fileName = (char*) arg;
91 int ifd; // the inotify file des
92 int wfd; // the watched file des
94 char rbuf[4096]; // large read buffer as the event is var len
97 struct timeval timeout;
98 char* dname=NULL; // directory name
99 char* bname = NULL; // basename
101 char* log_level=NULL;
103 dname = strdup( fileName); // defrock the file name into dir and basename
104 if( (tok = strrchr( dname, '/' )) != NULL ) {
106 bname = strdup( tok+1 );
110 ifd = inotify_init1( 0 ); // initialise watcher setting blocking read (no option)
112 fprintf( stderr, "### ERR ### unable to initialise file watch %s\n", strerror( errno ) );
114 wfd = inotify_add_watch( ifd, dname, IN_MOVED_TO | IN_CLOSE_WRITE ); // we only care about close write changes
117 fprintf( stderr, "### ERR ### unable to add watch on config file %s: %s\n", fileName, strerror( errno ) );
121 memset( &timeout, 0, sizeof(timeout) );
126 res = select (ifd + 1, &fds, NULL, NULL, &timeout);
129 n = read( ifd, rbuf, sizeof( rbuf ) ); // read the event
131 if( errno == EAGAIN ) {
133 fprintf( stderr, "### CRIT ### config listener read err: %s\n", strerror( errno ) );
138 //Retrieving Log Level from configmap by parsing configmap file
139 log_level = parse_file(fileName);
140 update_mdc_log_level_severity(log_level); //setting log level
144 inotify_rm_watch(ifd,wfd);
154 void update_mdc_log_level_severity(char* log_level)
156 mdclog_severity_t level = MDCLOG_ERR;
158 if(log_level == NULL)
160 printf("### ERR ### Invalid Log-Level Configuration in ConfigMap, Default Log-Level Applied: %d\n",level);
162 else if(strcasecmp(log_level,"1")==0)
166 else if(strcasecmp(log_level,"2")==0)
170 else if(strcasecmp(log_level,"3")==0)
174 else if(strcasecmp(log_level,"4")==0)
176 level = MDCLOG_DEBUG;
179 mdclog_level_set(level);
181 static char* parse_file(char* filename)
185 char *string_match = "log-level";
187 FILE *file = fopen ( filename, "r" );
191 while ( fgets ( line, sizeof line, file ) != NULL )
193 token = strtok(line, search);
194 if(strcmp(token,string_match)==0)
197 token = strtok(NULL, search);
198 token = strtok(token, "\n");//removing newline if any
205 return(strdup(token));
210 char *read_env_param(const char*envkey)
214 char *value = getenv(envkey);
216 return strdup(value);
221 void dynamic_log_level_change()
223 char *logFile_Name = read_env_param(LOG_FILE_CONFIG_MAP);
224 char* log_level_init=NULL;
227 log_level_init = parse_file(logFile_Name);
228 update_mdc_log_level_severity(log_level_init); //setting log level
229 free(log_level_init);
232 enable_log_change_notify(logFile_Name);
238 int log_change_monitor = 0;
240 mdclog_attr_init(&attr);
241 mdclog_attr_set_ident(attr, "E2Terminator");
243 if(mdclog_format_initialize(log_change_monitor)!=0)
244 mdclog_write(MDCLOG_ERR, "Failed to intialize MDC log format !!!");
245 dynamic_log_level_change();
246 mdclog_attr_destroy(attr);
248 auto start_time = std::chrono::high_resolution_clock::now();
249 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
252 return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
255 double approx_CPU_MHz(unsigned sleepTime) {
256 using namespace std::chrono_literals;
258 uint64_t cycles_start = rdtscp(aux);
259 double time_start = age();
260 std::this_thread::sleep_for(sleepTime * 1ms);
261 uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
262 double elapsed_time = age() - time_start;
263 return elapsed_cycles / elapsed_time;
266 //std::atomic<int64_t> rmrCounter{0};
267 std::atomic<int64_t> num_of_messages{0};
268 std::atomic<int64_t> num_of_XAPP_messages{0};
269 static long transactionCounter = 0;
271 int buildListeningPort(sctp_params_t &sctpParams) {
272 sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
273 if (sctpParams.listenFD <= 0) {
274 mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
278 struct sockaddr_in6 serverAddress {};
279 serverAddress.sin6_family = AF_INET6;
280 serverAddress.sin6_addr = in6addr_any;
281 serverAddress.sin6_port = htons(sctpParams.sctpPort);
282 if (bind(sctpParams.listenFD, (SA *)&serverAddress, sizeof(serverAddress)) < 0 ) {
283 mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
286 if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
287 //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
290 if (mdclog_level_get() >= MDCLOG_DEBUG) {
291 struct sockaddr_in6 clientAddress {};
292 socklen_t len = sizeof(clientAddress);
293 getsockname(sctpParams.listenFD, (SA *)&clientAddress, &len);
295 inet_ntop(AF_INET6, &clientAddress.sin6_addr, buff, sizeof(buff));
296 mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(clientAddress.sin6_port));
299 if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
300 mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
303 struct epoll_event event {};
304 event.events = EPOLLIN | EPOLLET;
305 event.data.fd = sctpParams.listenFD;
307 // add listening port to epoll
308 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
309 printf("Failed to add descriptor to epoll\n");
310 mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
317 int buildConfiguration(sctp_params_t &sctpParams) {
318 path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
320 const int size = 2048;
321 auto fileSize = file_size(p);
322 if (fileSize > size) {
323 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
327 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
332 if (conf.openConfigFile(p.string()) == -1) {
333 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
334 p.string().c_str(), strerror(errno));
337 int rmrPort = conf.getIntValue("nano");
339 mdclog_write(MDCLOG_ERR, "illegal RMR port ");
342 sctpParams.rmrPort = (uint16_t)rmrPort;
343 snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
344 auto tmpStr = conf.getStringValue("volume");
345 if (tmpStr.length() == 0) {
346 mdclog_write(MDCLOG_ERR, "illegal volume.");
350 char tmpLogFilespec[VOLUME_URL_SIZE];
351 tmpLogFilespec[0] = 0;
352 sctpParams.volume[0] = 0;
353 snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
354 // copy the name to temp file as well
355 snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
358 // define the file name in the tmp directory under the volume
359 strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
361 sctpParams.myIP = conf.getStringValue("local-ip");
362 if (sctpParams.myIP.length() == 0) {
363 mdclog_write(MDCLOG_ERR, "illegal local-ip.");
367 int sctpPort = conf.getIntValue("sctp-port");
368 if (sctpPort == -1) {
369 mdclog_write(MDCLOG_ERR, "illegal SCTP port ");
372 sctpParams.sctpPort = (uint16_t)sctpPort;
374 sctpParams.fqdn = conf.getStringValue("external-fqdn");
375 if (sctpParams.fqdn.length() == 0) {
376 mdclog_write(MDCLOG_ERR, "illegal external-fqdn");
380 std::string pod = conf.getStringValue("pod_name");
382 if (pod.length() == 0) {
383 mdclog_write(MDCLOG_ERR, "illegal pod_name in config file");
386 auto *podName = getenv(pod.c_str());
387 if (podName == nullptr) {
388 mdclog_write(MDCLOG_ERR, "illegal pod_name or environment variable not exists : %s", pod.c_str());
392 sctpParams.podName.assign(podName);
393 if (sctpParams.podName.length() == 0) {
394 mdclog_write(MDCLOG_ERR, "illegal pod_name");
399 tmpStr = conf.getStringValue("trace");
400 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
401 if ((tmpStr.compare("start")) == 0) {
402 mdclog_write(MDCLOG_INFO, "Trace set to: start");
403 sctpParams.trace = true;
404 } else if ((tmpStr.compare("stop")) == 0) {
405 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
406 sctpParams.trace = false;
408 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
409 sctpParams.trace = false;
411 jsonTrace = sctpParams.trace;
413 sctpParams.epollTimeOut = -1;
415 tmpStr = conf.getStringValue("prometheusPort");
416 if (tmpStr.length() != 0) {
417 sctpParams.prometheusPort = tmpStr;
420 sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
422 "\"pod_name\": \"%s\"}",
423 (const char *)sctpParams.myIP.c_str(),
425 sctpParams.fqdn.c_str(),
426 sctpParams.podName.c_str());
428 if (mdclog_level_get() >= MDCLOG_INFO) {
429 mdclog_write(MDCLOG_DEBUG,"RMR Port: %s", to_string(sctpParams.rmrPort).c_str());
430 mdclog_write(MDCLOG_DEBUG,"LogLevel: %s", to_string(sctpParams.logLevel).c_str());
431 mdclog_write(MDCLOG_DEBUG,"volume: %s", sctpParams.volume);
432 mdclog_write(MDCLOG_DEBUG,"tmpLogFilespec: %s", tmpLogFilespec);
433 mdclog_write(MDCLOG_DEBUG,"my ip: %s", sctpParams.myIP.c_str());
434 mdclog_write(MDCLOG_DEBUG,"pod name: %s", sctpParams.podName.c_str());
436 mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
439 // Files written to the current working directory
440 boostLogger = logging::add_file_log(
441 keywords::file_name = tmpLogFilespec, // to temp directory
442 keywords::rotation_size = 10 * 1024 * 1024,
443 keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
444 keywords::format = "%Message%"
445 //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
448 // Setup a destination folder for collecting rotated (closed) files --since the same volume can use rename()
449 boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
450 keywords::target = sctpParams.volume
453 // Upon restart, scan the directory for files matching the file_name pattern
454 boostLogger->locked_backend()->scan_for_files();
456 // Enable auto-flushing after each tmpStr record written
457 if (mdclog_level_get() >= MDCLOG_DEBUG) {
458 boostLogger->locked_backend()->auto_flush(true);
464 void startPrometheus(sctp_params_t &sctpParams) {
465 sctpParams.prometheusFamily = &BuildCounter()
467 .Help("E2T message counter")
468 .Labels({{"POD_NAME", sctpParams.podName}})
469 .Register(*sctpParams.prometheusRegistry);
471 string prometheusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
472 if (mdclog_level_get() >= MDCLOG_DEBUG) {
473 mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", prometheusPath.c_str());
475 sctpParams.prometheusExposer = new Exposer(prometheusPath, 1);
476 sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
480 int main(const int argc, char **argv) {
481 sctp_params_t sctpParams;
483 int e2_test_main(const int argc, char **argv, sctp_params_t &sctpParams) {
487 std::random_device device{};
488 std::mt19937 generator(device());
489 std::uniform_int_distribution<long> distribution(1, (long) 1e12);
490 transactionCounter = distribution(generator);
494 // uint32_t aux1 = 0;
495 // st = rdtscp(aux1);
497 unsigned num_cpus = std::thread::hardware_concurrency();
499 if (std::signal(SIGINT, catch_function) == SIG_ERR) {
500 mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
503 if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
504 mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
507 if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
508 mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
512 cpuClock = approx_CPU_MHz(100);
514 mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
516 auto result = parse(argc, argv, sctpParams);
518 if (buildConfiguration(sctpParams) != 0) {
522 //auto registry = std::make_shared<Registry>();
523 sctpParams.prometheusRegistry = std::make_shared<Registry>();
525 //sctpParams.prometheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
527 startPrometheus(sctpParams);
530 sctpParams.epoll_fd = epoll_create1(0);
531 if (sctpParams.epoll_fd == -1) {
532 mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
536 getRmrContext(sctpParams);
538 if (sctpParams.rmrCtx == nullptr) {
539 close(sctpParams.epoll_fd);
543 if (buildInotify(sctpParams) == -1) {
544 close(sctpParams.rmrListenFd);
545 rmr_close(sctpParams.rmrCtx);
546 close(sctpParams.epoll_fd);
550 if (buildListeningPort(sctpParams) != 0) {
551 close(sctpParams.rmrListenFd);
552 rmr_close(sctpParams.rmrCtx);
553 close(sctpParams.epoll_fd);
557 sctpParams.sctpMap = new mapWrapper();
559 std::vector<std::thread> threads(num_cpus);
560 // std::vector<std::thread> threads;
563 for (unsigned int i = 0; i < num_cpus; i++) {
564 threads[i] = std::thread(listener, &sctpParams);
569 int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
571 mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
576 //loop over term_init until first message from xApp
577 handleTermInit(sctpParams);
579 for (auto &t : threads) {
585 void handleTermInit(sctp_params_t &sctpParams) {
586 sendTermInit(sctpParams);
587 //send to e2 manager init of e2 term
592 auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
593 if (xappMessages > 0) {
594 if (mdclog_level_get() >= MDCLOG_INFO) {
595 mdclog_write(MDCLOG_INFO, "Got a message from some application, stop sending E2_TERM_INIT");
601 if (count % 1000 == 0) {
602 mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
603 sendTermInit(sctpParams);
608 void sendTermInit(sctp_params_t &sctpParams) {
609 rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
612 msg->mtype = E2_TERM_INIT;
614 rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
615 static unsigned char tx[32];
616 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
617 rmr_bytes2xact(msg, tx, txLen);
618 msg = rmr_send_msg(sctpParams.rmrCtx, msg);
619 if (msg == nullptr) {
620 msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
621 } else if (msg->state == 0) {
623 if (mdclog_level_get() >= MDCLOG_INFO) {
624 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT successfully sent ");
628 if (count % 100 == 0) {
629 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
644 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
645 cxxopts::Options options(argv[0], "e2 term help");
646 options.positional_help("[optional args]").show_positional_help();
647 options.allow_unrecognised_options().add_options()
648 ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
649 ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
650 ("h,help", "Print help");
652 auto result = options.parse(argc, (const char **&)argv);
654 if (result.count("help")) {
655 std::cout << options.help({""}) << std::endl;
664 * @return -1 failed 0 success
666 int buildInotify(sctp_params_t &sctpParams) {
667 sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
668 if (sctpParams.inotifyFD == -1) {
669 mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
673 sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
674 (const char *)sctpParams.configFilePath.c_str(),
675 (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
676 if (sctpParams.inotifyWD == -1) {
677 mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to inotify (inotify_add_watch) %s",
678 sctpParams.configFilePath.c_str(),
680 close(sctpParams.inotifyFD);
684 struct epoll_event event{};
685 event.events = (EPOLLIN);
686 event.data.fd = sctpParams.inotifyFD;
687 // add listening RMR FD to epoll
688 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
689 mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
690 close(sctpParams.inotifyFD);
701 void listener(sctp_params_t *params) {
702 int num_of_SCTP_messages = 0;
703 auto totalTime = 0.0;
704 std::thread::id this_id = std::this_thread::get_id();
706 auto pod_name = std::getenv("POD_NAME");
707 auto container_name = std::getenv("CONTAINER_NAME");
708 auto service_name = std::getenv("SERVICE_NAME");
709 auto host_name = std::getenv("HOST_NAME");
710 auto system_name = std::getenv("SYSTEM_NAME");
711 auto pid = std::to_string(getpid()).c_str();
712 streambuf *oldCout = cout.rdbuf();
713 ostringstream memCout;
715 cout.rdbuf(memCout.rdbuf());
717 //return to the normal cout
721 memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
722 tid[memCout.str().length()] = 0;
723 mdclog_mdc_add("SYSTEM_NAME", system_name);
724 mdclog_mdc_add("HOST_NAME", host_name);
725 mdclog_mdc_add("SERVICE_NAME", service_name);
726 mdclog_mdc_add("CONTAINER_NAME", container_name);
727 mdclog_mdc_add("POD_NAME", pod_name);
728 mdclog_mdc_add("PID", pid);
730 if (mdclog_level_get() >= MDCLOG_DEBUG) {
731 mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
734 RmrMessagesBuffer_t rmrMessageBuffer{};
735 //create and init RMR
736 rmrMessageBuffer.rmrCtx = params->rmrCtx;
738 auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
739 struct timespec end{0, 0};
740 struct timespec start{0, 0};
742 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
743 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
745 memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
746 rmrMessageBuffer.ka_message_len = params->ka_message_length;
747 rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
749 if (mdclog_level_get() >= MDCLOG_DEBUG) {
750 mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
753 ReportingMessages_t message {};
755 // for (int i = 0; i < MAX_RMR_BUFF_ARRAY; i++) {
756 // rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
757 // rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
761 if (mdclog_level_get() >= MDCLOG_DEBUG) {
762 mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
765 auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
767 auto numOfEvents = 1;
769 if (numOfEvents == 0) { // time out
770 if (mdclog_level_get() >= MDCLOG_DEBUG) {
771 mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
774 } else if (numOfEvents < 0) {
775 if (errno == EINTR) {
776 if (mdclog_level_get() >= MDCLOG_DEBUG) {
777 mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
781 mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
784 for (auto i = 0; i < numOfEvents; i++) {
785 if (mdclog_level_get() >= MDCLOG_DEBUG) {
786 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
788 clock_gettime(CLOCK_MONOTONIC, &message.message.time);
789 start.tv_sec = message.message.time.tv_sec;
790 start.tv_nsec = message.message.time.tv_nsec;
793 if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
794 handlepoll_error(events[i], message, rmrMessageBuffer, params);
795 } else if (events[i].events & EPOLLOUT) {
796 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
797 } else if (params->listenFD == events[i].data.fd) {
798 if (mdclog_level_get() >= MDCLOG_INFO) {
799 mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
801 // new connection is requested from RAN start build connection
803 struct sockaddr in_addr {};
805 char hostBuff[NI_MAXHOST];
806 char portBuff[NI_MAXSERV];
808 in_len = sizeof(in_addr);
809 auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
810 if(peerInfo == nullptr){
811 mdclog_write(MDCLOG_ERR, "calloc failed");
814 peerInfo->sctpParams = params;
815 peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
816 if (peerInfo->fileDescriptor == -1) {
817 if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
818 /* We have processed all incoming connections. */
821 mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
825 if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
826 mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
827 close(peerInfo->fileDescriptor);
830 auto ans = getnameinfo(&in_addr, in_len,
831 peerInfo->hostName, NI_MAXHOST,
832 peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
834 mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
835 close(peerInfo->fileDescriptor);
838 if (mdclog_level_get() >= MDCLOG_DEBUG) {
839 mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
841 peerInfo->isConnected = false;
842 peerInfo->gotSetup = false;
843 if (addToEpoll(params->epoll_fd,
846 params->sctpMap, nullptr,
852 } else if (params->rmrListenFd == events[i].data.fd) {
853 // got message from XAPP
854 //num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
855 num_of_messages.fetch_add(1, std::memory_order_release);
856 if (mdclog_level_get() >= MDCLOG_DEBUG) {
857 mdclog_write(MDCLOG_DEBUG, "new RMR message");
859 if (receiveXappMessages(params->sctpMap,
861 message.message.time) != 0) {
862 mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
864 } else if (params->inotifyFD == events[i].data.fd) {
865 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
866 handleConfigChange(params);
868 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
869 * We must read whatever data is available completely, as we are running
870 * in edge-triggered mode and won't get a notification again for the same data. */
871 num_of_messages.fetch_add(1, std::memory_order_release);
872 if (mdclog_level_get() >= MDCLOG_DEBUG) {
873 mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
875 receiveDataFromSctp(&events[i],
877 num_of_SCTP_messages,
879 message.message.time);
882 clock_gettime(CLOCK_MONOTONIC, &end);
883 if (mdclog_level_get() >= MDCLOG_INFO) {
884 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
885 ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
887 if (mdclog_level_get() >= MDCLOG_DEBUG) {
888 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
889 end.tv_sec - start.tv_sec,
890 end.tv_nsec - start.tv_nsec);
903 void handleConfigChange(sctp_params_t *sctpParams) {
904 char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
905 const struct inotify_event *event;
908 path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
909 auto endlessLoop = true;
910 while (endlessLoop) {
911 auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
913 if (errno != EAGAIN) {
914 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
924 for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
925 event = (const struct inotify_event *)ptr;
926 if (event->mask & (uint32_t)IN_ISDIR) {
930 // the directory name
931 if (sctpParams->inotifyWD == event->wd) {
935 auto retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
940 // only the file we want
941 if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
942 if (mdclog_level_get() >= MDCLOG_INFO) {
943 mdclog_write(MDCLOG_INFO, "Configuration file changed");
946 const int size = 2048;
947 auto fileSize = file_size(p);
948 if (fileSize > size) {
949 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
953 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
958 if (conf.openConfigFile(p.string()) == -1) {
959 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
960 p.string().c_str(), strerror(errno));
963 auto tmpStr = conf.getStringValue("loglevel");
964 if (tmpStr.length() == 0) {
965 mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
968 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
970 if ((tmpStr.compare("debug")) == 0) {
971 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
972 sctpParams->logLevel = MDCLOG_DEBUG;
973 } else if ((tmpStr.compare("info")) == 0) {
974 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
975 sctpParams->logLevel = MDCLOG_INFO;
976 } else if ((tmpStr.compare("warning")) == 0) {
977 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
978 sctpParams->logLevel = MDCLOG_WARN;
979 } else if ((tmpStr.compare("error")) == 0) {
980 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
981 sctpParams->logLevel = MDCLOG_ERR;
983 mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
984 sctpParams->logLevel = MDCLOG_INFO;
986 mdclog_level_set(sctpParams->logLevel);
987 tmpStr = conf.getStringValue("trace");
988 if (tmpStr.length() == 0) {
989 mdclog_write(MDCLOG_ERR, "illegal trace. Set trace to stop");
993 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
994 if ((tmpStr.compare("start")) == 0) {
995 mdclog_write(MDCLOG_INFO, "Trace set to: start");
996 sctpParams->trace = true;
997 } else if ((tmpStr.compare("stop")) == 0) {
998 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
999 sctpParams->trace = false;
1001 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
1002 sctpParams->trace = false;
1004 jsonTrace = sctpParams->trace;
1007 endlessLoop = false;
1017 * @param rmrMessageBuffer
1020 void handleEinprogressMessages(struct epoll_event &event,
1021 ReportingMessages_t &message,
1022 RmrMessagesBuffer_t &rmrMessageBuffer,
1023 sctp_params_t *params) {
1024 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
1025 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1027 mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
1029 socklen_t retValLen = 0;
1030 auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
1031 if (rc != 0 || retVal != 0) {
1034 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1035 "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
1036 peerInfo->enodbName, strerror(errno));
1037 } else if (retVal != 0) {
1038 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1039 "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
1040 peerInfo->enodbName);
1043 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1044 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
1045 mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
1046 message.message.direction = 'N';
1047 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
1048 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1051 memset(peerInfo->asnData, 0, peerInfo->asnLength);
1052 peerInfo->asnLength = 0;
1053 peerInfo->mtype = 0;
1057 peerInfo->isConnected = true;
1059 if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
1060 peerInfo->mtype) != 0) {
1061 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
1065 message.message.asndata = (unsigned char *)peerInfo->asnData;
1066 message.message.asnLength = peerInfo->asnLength;
1067 message.message.messageType = peerInfo->mtype;
1068 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1069 num_of_messages.fetch_add(1, std::memory_order_release);
1070 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1071 mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
1072 message.message.enodbName);
1074 if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
1075 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1076 mdclog_write(MDCLOG_DEBUG, "Error write to SCTP %s %d", __func__, __LINE__);
1081 memset(peerInfo->asnData, 0, peerInfo->asnLength);
1082 peerInfo->asnLength = 0;
1083 peerInfo->mtype = 0;
1087 void handlepoll_error(struct epoll_event &event,
1088 ReportingMessages_t &message,
1089 RmrMessagesBuffer_t &rmrMessageBuffer,
1090 sctp_params_t *params) {
1091 if (event.data.fd != params->rmrListenFd) {
1092 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
1093 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
1094 event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
1097 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1098 "%s|Failed SCTP Connection",
1099 peerInfo->enodbName);
1100 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1101 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
1103 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1104 message.message.direction = 'N';
1105 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
1106 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1109 close(peerInfo->fileDescriptor);
1110 params->sctpMap->erase(peerInfo->enodbName);
1111 cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
1113 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
1121 int setSocketNoBlocking(int socket) {
1122 auto flags = fcntl(socket, F_GETFL, 0);
1125 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1129 flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1130 if (fcntl(socket, F_SETFL, flags) == -1) {
1131 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1143 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1145 auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1146 char searchBuff[2048]{};
1148 snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1149 m->erase(searchBuff);
1151 m->erase(val->enodbName);
1159 * @param fd file descriptor
1160 * @param data the asn data to send
1161 * @param len length of the data
1162 * @param enodbName the enodbName as in the map for printing purpose
1163 * @param m map host information
1164 * @param mtype message number
1165 * @return 0 success, a negative number on fail
1167 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1168 auto loglevel = mdclog_level_get();
1169 int fd = peerInfo->fileDescriptor;
1170 if (loglevel >= MDCLOG_DEBUG) {
1171 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1172 message.message.enodbName, __FUNCTION__);
1176 if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1177 if (errno == EINTR) {
1180 mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1181 if (!peerInfo->isConnected) {
1182 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1187 cleanHashEntry(peerInfo, m);
1189 char key[MAX_ENODB_NAME_SIZE * 2];
1190 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1191 message.message.messageType);
1192 if (loglevel >= MDCLOG_DEBUG) {
1193 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1195 auto tmp = m->find(key);
1204 message.message.direction = 'D';
1205 // send report.buffer of size
1206 buildJsonMessage(message);
1208 if (loglevel >= MDCLOG_DEBUG) {
1209 mdclog_write(MDCLOG_DEBUG,
1210 "SCTP message for CU %s sent from %s",
1211 message.message.enodbName,
1221 * @param rmrMessageBuffer
1223 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1224 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1225 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1227 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1228 mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1229 message.message.enodbName, (unsigned long) message.message.asnLength);
1239 * @param numOfMessages
1240 * @param rmrMessageBuffer
1244 int receiveDataFromSctp(struct epoll_event *events,
1245 Sctp_Map_t *sctpMap,
1247 RmrMessagesBuffer_t &rmrMessageBuffer,
1248 struct timespec &ts) {
1249 /* We have data on the fd waiting to be read. Read and display it.
1250 * We must read whatever data is available completely, as we are running
1251 * in edge-triggered mode and won't get a notification again for the same data. */
1252 ReportingMessages_t message {};
1254 auto loglevel = mdclog_level_get();
1256 // get the identity of the interface
1257 message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1259 struct timespec start{0, 0};
1260 struct timespec decodeStart{0, 0};
1261 struct timespec end{0, 0};
1263 E2AP_PDU_t *pdu = nullptr;
1266 if (loglevel >= MDCLOG_DEBUG) {
1267 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1268 clock_gettime(CLOCK_MONOTONIC, &start);
1270 // read the buffer directly to rmr payload
1271 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1272 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1273 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1275 if (loglevel >= MDCLOG_DEBUG) {
1276 mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1277 message.peerInfo->fileDescriptor, message.message.asnLength);
1280 memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1281 message.message.direction = 'U';
1282 message.message.time.tv_nsec = ts.tv_nsec;
1283 message.message.time.tv_sec = ts.tv_sec;
1285 if (message.message.asnLength < 0) {
1286 if (errno == EINTR) {
1289 /* If errno == EAGAIN, that means we have read all
1290 data. So goReportingMessages_t back to the main loop. */
1291 if (errno != EAGAIN) {
1292 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1294 } else if (loglevel >= MDCLOG_DEBUG) {
1295 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1298 } else if (message.message.asnLength == 0) {
1299 /* End of file. The remote has closed the connection. */
1300 if (loglevel >= MDCLOG_INFO) {
1301 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1302 message.peerInfo->fileDescriptor);
1308 if (loglevel >= MDCLOG_DEBUG) {
1309 char printBuffer[RECEIVE_SCTP_BUFFER_SIZE]{};
1310 char *tmp = printBuffer;
1311 for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1312 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1315 printBuffer[message.message.asnLength] = 0;
1316 clock_gettime(CLOCK_MONOTONIC, &end);
1317 mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1318 message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1319 mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data = : %s", message.message.asnLength,
1321 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1324 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1325 message.message.asndata, message.message.asnLength);
1326 if (rval.code != RC_OK) {
1327 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1328 message.peerInfo->enodbName);
1332 if (loglevel >= MDCLOG_DEBUG) {
1333 clock_gettime(CLOCK_MONOTONIC, &end);
1334 mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1335 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1338 FILE *stream = open_memstream(&printBuffer, &size);
1339 asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1340 mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1341 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1347 switch (pdu->present) {
1348 case E2AP_PDU_PR_initiatingMessage: {//initiating message
1349 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1352 case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1353 asnSuccessfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1356 case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1357 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1361 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1364 if (loglevel >= MDCLOG_DEBUG) {
1365 clock_gettime(CLOCK_MONOTONIC, &end);
1366 mdclog_write(MDCLOG_DEBUG,
1367 "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1368 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1371 if (pdu != nullptr) {
1372 ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1373 //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1379 if (loglevel >= MDCLOG_INFO) {
1380 mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1382 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1383 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1385 "%s|CU disconnected unexpectedly",
1386 message.peerInfo->enodbName);
1387 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1389 if (sendRequestToXapp(message,
1390 RIC_SCTP_CONNECTION_FAILURE,
1391 rmrMessageBuffer) != 0) {
1392 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1395 /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1396 close(message.peerInfo->fileDescriptor);
1397 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1399 if (loglevel >= MDCLOG_DEBUG) {
1400 clock_gettime(CLOCK_MONOTONIC, &end);
1401 mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1402 end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1408 static void buildAndSendSetupRequest(ReportingMessages_t &message,
1409 RmrMessagesBuffer_t &rmrMessageBuffer,
1411 string const &messageName,
1412 string const &ieName,
1413 vector<string> &functionsToAdd_v,
1414 vector<string> &functionsToModified_v*/) {
1415 auto logLevel = mdclog_level_get();
1416 // now we can send the data to e2Mgr
1419 auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1420 unsigned char *buffer = nullptr;
1421 buffer = (unsigned char *) calloc(buffer_size, sizeof(unsigned char));
1424 mdclog_write(MDCLOG_ERR, "Allocating buffer for %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1428 er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1429 if (er.encoded == -1) {
1430 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1432 } else if (er.encoded > (ssize_t) buffer_size) {
1433 buffer_size = er.encoded + 128;
1434 mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1436 asn_DEF_E2AP_PDU.name, buffer_size);
1437 buffer_size = er.encoded + 128;
1439 unsigned char *newBuffer = nullptr;
1440 newBuffer = (unsigned char *) realloc(buffer, buffer_size);
1444 mdclog_write(MDCLOG_ERR, "Reallocating buffer for %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1451 buffer[er.encoded] = '\0';
1456 string res((char *)buffer);
1457 res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1458 res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1459 res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1462 // if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1463 // res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1466 // if (res.length() == 0) {
1467 // rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1468 // rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1469 // message.peerInfo->sctpParams->myIP.c_str(),
1470 // message.peerInfo->sctpParams->rmrPort,
1473 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1474 rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1475 message.peerInfo->sctpParams->myIP.c_str(),
1476 message.peerInfo->sctpParams->rmrPort,
1480 if (logLevel >= MDCLOG_DEBUG) {
1481 mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1484 rmrMsg->mtype = message.message.messageType;
1486 rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1488 static unsigned char tx[32];
1489 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1490 rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1492 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1493 if (rmrMsg == nullptr) {
1494 mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1495 } else if (rmrMsg->state != 0) {
1496 char meid[RMR_MAX_MEID]{};
1497 if (rmrMsg->state == RMR_ERR_RETRY) {
1500 mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1501 rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1502 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1503 if (rmrMsg == nullptr) {
1504 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1505 } else if (rmrMsg->state != 0) {
1506 mdclog_write(MDCLOG_ERR,
1507 "RMR Retry failed %s sending request %d to Xapp from %s",
1508 translateRmrErrorMessages(rmrMsg->state).c_str(),
1510 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1513 mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1514 translateRmrErrorMessages(rmrMsg->state).c_str(),
1516 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1519 message.peerInfo->gotSetup = true;
1520 buildJsonMessage(message);
1522 if (rmrMsg != nullptr) {
1523 rmr_free_msg(rmrMsg);
1531 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1533 runFunXML_v.clear();
1534 for (auto j = 0; j < list.list.count; j++) {
1535 auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1536 if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1537 (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1539 E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1540 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1541 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1542 (void **)&ranFunDef,
1543 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1544 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1545 if (rval.code != RC_OK) {
1546 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1548 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1552 auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1553 unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1554 memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1556 auto er = asn_encode_to_buffer(nullptr,
1558 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1562 if (er.encoded == -1) {
1563 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1564 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1566 } else if (er.encoded > (ssize_t)xml_buffer_size) {
1567 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1568 (int) xml_buffer_size,
1569 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1571 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1572 mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1573 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1578 string runFuncs = (char *)(xml_buffer);
1579 runFunXML_v.emplace_back(runFuncs);
1586 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1587 Sctp_Map_t *sctpMap,
1588 ReportingMessages_t &message,
1589 vector <string> &RANfunctionsAdded_v,
1590 vector <string> &RANfunctionsModified_v) {
1591 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1592 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1593 auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1594 if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1595 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1596 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1597 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1598 ie->value.choice.RANfunctions_List.list.count);
1600 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1604 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1605 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1606 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1607 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1608 ie->value.choice.RANfunctions_List.list.count);
1610 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1616 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1617 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1618 RANfunctionsAdded_v.size());
1626 void buildPrometheusList(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1627 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1628 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1630 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1631 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1633 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1634 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1636 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1637 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1639 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1640 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1641 // ---------------------------------------------
1642 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1643 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1645 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1646 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1648 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1649 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1651 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1652 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1653 //-------------------------------------------------------------
1655 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1656 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1658 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1659 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1661 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1662 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1664 //====================================================================================
1665 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1666 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1668 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1669 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1671 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1672 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1674 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1675 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1677 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1678 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1680 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1681 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1682 //---------------------------------------------------------------------------------------------------------
1683 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1684 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1686 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1687 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1689 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1690 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1691 //----------------------------------------------------------------------------------------------------------------
1692 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1693 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1695 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1696 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1703 * @param RANfunctionsAdded_v
1706 int collectSetupRequestData(E2AP_PDU_t *pdu,
1707 Sctp_Map_t *sctpMap,
1708 ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1709 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1710 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1711 auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1712 if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1713 // get the ran name for meid
1714 if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1715 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1716 mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1717 // no message will be sent
1721 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1722 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1724 } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1725 if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1726 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1727 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1728 ie->value.choice.RANfunctions_List.list.count);
1730 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1736 // if (mdclog_level_get() >= MDCLOG_DEBUG) {
1737 // mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1738 // RANfunctionsAdded_v.size());
1743 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1744 E2AP_PDU_t *pdu = nullptr;
1746 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1747 mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1748 rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1750 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1751 rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1752 if (rval.code != RC_OK) {
1753 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
1755 message.message.enodbName);
1759 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1760 auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1761 rmrMessageBuffer.sendMessage->payload, buff_size);
1762 if (er.encoded == -1) {
1763 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1765 } else if (er.encoded > (ssize_t)buff_size) {
1766 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1767 (int)rmrMessageBuffer.sendMessage->len,
1768 asn_DEF_E2AP_PDU.name,
1773 rmrMessageBuffer.sendMessage->len = er.encoded;
1782 * @param rmrMessageBuffer
1784 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1785 Sctp_Map_t *sctpMap,
1786 ReportingMessages_t &message,
1787 RmrMessagesBuffer_t &rmrMessageBuffer) {
1788 auto logLevel = mdclog_level_get();
1789 auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1790 if (logLevel >= MDCLOG_DEBUG) {
1791 mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1793 switch (procedureCode) {
1794 case ProcedureCode_id_E2setup: {
1795 if (logLevel >= MDCLOG_DEBUG) {
1796 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1799 // vector <string> RANfunctionsAdded_v;
1800 // vector <string> RANfunctionsModified_v;
1801 // RANfunctionsAdded_v.clear();
1802 // RANfunctionsModified_v.clear();
1803 if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1807 buildPrometheusList(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1809 string messageName("E2setupRequest");
1810 string ieName("E2setupRequestIEs");
1811 message.message.messageType = RIC_E2_SETUP_REQ;
1812 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1813 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1814 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1817 case ProcedureCode_id_RICserviceUpdate: {
1818 if (logLevel >= MDCLOG_DEBUG) {
1819 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1821 // vector <string> RANfunctionsAdded_v;
1822 // vector <string> RANfunctionsModified_v;
1823 // RANfunctionsAdded_v.clear();
1824 // RANfunctionsModified_v.clear();
1825 // if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1826 // RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1830 string messageName("RICserviceUpdate");
1831 string ieName("RICserviceUpdateIEs");
1832 message.message.messageType = RIC_SERVICE_UPDATE;
1834 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1835 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1837 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1840 case ProcedureCode_id_ErrorIndication: {
1841 if (logLevel >= MDCLOG_DEBUG) {
1842 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1845 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1846 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1848 if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1849 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1853 case ProcedureCode_id_Reset: {
1854 if (logLevel >= MDCLOG_DEBUG) {
1855 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1858 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1859 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1861 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1865 if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1866 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1870 case ProcedureCode_id_RICindication: {
1871 if (logLevel >= MDCLOG_DEBUG) {
1872 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1874 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1875 auto messageSent = false;
1876 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1877 if (logLevel >= MDCLOG_DEBUG) {
1878 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1880 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1881 if (logLevel >= MDCLOG_DEBUG) {
1882 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1884 if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1885 static unsigned char tx[32];
1886 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1887 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1888 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1889 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1890 (unsigned char *)message.message.enodbName,
1891 strlen(message.message.enodbName));
1892 rmrMessageBuffer.sendMessage->state = 0;
1893 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1895 //ie->value.choice.RICrequestID.ricInstanceID;
1896 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1897 mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1898 rmrMessageBuffer.sendMessage->sub_id,
1899 rmrMessageBuffer.sendMessage->mtype,
1900 ie->value.choice.RICrequestID.ricInstanceID,
1901 ie->value.choice.RICrequestID.ricRequestorID);
1904 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1905 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1907 sendRmrMessage(rmrMessageBuffer, message);
1910 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1920 mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1921 message.message.messageType = 0; // no RMR message type yet
1923 buildJsonMessage(message);
1934 * @param rmrMessageBuffer
1936 void asnSuccessfulMsg(E2AP_PDU_t *pdu,
1937 Sctp_Map_t *sctpMap,
1938 ReportingMessages_t &message,
1939 RmrMessagesBuffer_t &rmrMessageBuffer) {
1940 auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1941 auto logLevel = mdclog_level_get();
1942 if (logLevel >= MDCLOG_INFO) {
1943 mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1945 switch (procedureCode) {
1946 case ProcedureCode_id_Reset: {
1947 if (logLevel >= MDCLOG_DEBUG) {
1948 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1951 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1952 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1954 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1957 if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1958 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1962 case ProcedureCode_id_RICcontrol: {
1963 if (logLevel >= MDCLOG_DEBUG) {
1964 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1967 i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1968 auto messageSent = false;
1969 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1970 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1971 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1973 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1974 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1975 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1977 if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1978 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1979 rmrMessageBuffer.sendMessage->state = 0;
1980 // rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1981 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1983 static unsigned char tx[32];
1984 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1985 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1986 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1987 (unsigned char *)message.message.enodbName,
1988 strlen(message.message.enodbName));
1990 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1991 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1993 sendRmrMessage(rmrMessageBuffer, message);
1996 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
2006 case ProcedureCode_id_RICsubscription: {
2007 if (logLevel >= MDCLOG_DEBUG) {
2008 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
2011 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2012 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2014 if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
2015 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
2019 case ProcedureCode_id_RICsubscriptionDelete: {
2020 if (logLevel >= MDCLOG_DEBUG) {
2021 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
2024 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2025 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2027 if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
2028 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
2033 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
2034 message.message.messageType = 0; // no RMR message type yet
2035 buildJsonMessage(message);
2046 * @param rmrMessageBuffer
2048 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
2049 Sctp_Map_t *sctpMap,
2050 ReportingMessages_t &message,
2051 RmrMessagesBuffer_t &rmrMessageBuffer) {
2052 auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
2053 auto logLevel = mdclog_level_get();
2054 if (logLevel >= MDCLOG_INFO) {
2055 mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
2057 switch (procedureCode) {
2058 case ProcedureCode_id_RICcontrol: {
2059 if (logLevel >= MDCLOG_DEBUG) {
2060 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
2063 i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
2064 auto messageSent = false;
2065 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
2066 if (logLevel >= MDCLOG_DEBUG) {
2067 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
2069 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
2070 if (logLevel >= MDCLOG_DEBUG) {
2071 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
2073 if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
2074 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
2075 rmrMessageBuffer.sendMessage->state = 0;
2076 // rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
2077 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
2078 static unsigned char tx[32];
2079 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2080 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
2081 rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
2082 strlen(message.message.enodbName));
2084 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2085 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
2087 sendRmrMessage(rmrMessageBuffer, message);
2090 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
2099 case ProcedureCode_id_RICsubscription: {
2100 if (logLevel >= MDCLOG_DEBUG) {
2101 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
2104 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2105 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2107 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
2108 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
2112 case ProcedureCode_id_RICsubscriptionDelete: {
2113 if (logLevel >= MDCLOG_DEBUG) {
2114 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
2117 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2118 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2120 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
2121 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
2126 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
2127 message.message.messageType = 0; // no RMR message type yet
2129 buildJsonMessage(message);
2140 * @param rmrMmessageBuffer
2143 int sendRequestToXapp(ReportingMessages_t &message,
2145 RmrMessagesBuffer_t &rmrMmessageBuffer) {
2146 rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
2147 (unsigned char *)message.message.enodbName,
2148 strlen(message.message.enodbName));
2149 message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
2150 rmrMmessageBuffer.sendMessage->state = 0;
2151 static unsigned char tx[32];
2152 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2153 rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
2155 auto rc = sendRmrMessage(rmrMmessageBuffer, message);
2161 * @param pSctpParams
2163 void getRmrContext(sctp_params_t &pSctpParams) {
2164 pSctpParams.rmrCtx = nullptr;
2165 pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
2166 if (pSctpParams.rmrCtx == nullptr) {
2167 mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
2171 rmr_set_stimeout(pSctpParams.rmrCtx, 0); // disable retries for any send operation
2172 // we need to find that routing table exist and we can run
2173 if (mdclog_level_get() >= MDCLOG_INFO) {
2174 mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2179 if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2183 if (count % 60 == 0) {
2184 mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2187 if (mdclog_level_get() >= MDCLOG_INFO) {
2188 mdclog_write(MDCLOG_INFO, "RMR running");
2190 rmr_init_trace(pSctpParams.rmrCtx, 200);
2191 // get the RMR fd for the epoll
2192 pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2193 struct epoll_event event{};
2194 // add RMR fd to epoll
2195 event.events = (EPOLLIN);
2196 event.data.fd = pSctpParams.rmrListenFd;
2197 // add listening RMR FD to epoll
2198 if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2199 mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2200 close(pSctpParams.rmrListenFd);
2201 rmr_close(pSctpParams.rmrCtx);
2202 pSctpParams.rmrCtx = nullptr;
2209 * @param rmrMessageBuffer
2212 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2213 E2AP_PDU_t *pdu = nullptr;
2215 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2216 mdclog_write(MDCLOG_DEBUG, "got xml Format data from xApp of size %d is:%s",
2217 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2219 auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2220 rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2221 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2222 mdclog_write(MDCLOG_DEBUG, "%s After decoding the XML to PDU", __func__ );
2224 if (rval.code != RC_OK) {
2225 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2227 message.message.enodbName);
2231 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2232 auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2233 rmrMessageBuffer.rcvMessage->payload, buff_size);
2234 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2235 mdclog_write(MDCLOG_DEBUG, "%s After encoding PDU to PER", __func__ );
2237 if (er.encoded == -1) {
2238 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2240 } else if (er.encoded > (ssize_t)buff_size) {
2241 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2242 (int)rmrMessageBuffer.rcvMessage->len,
2243 asn_DEF_E2AP_PDU.name,
2248 rmrMessageBuffer.rcvMessage->len = er.encoded;
2255 * @param rmrMessageBuffer
2259 int receiveXappMessages(Sctp_Map_t *sctpMap,
2260 RmrMessagesBuffer_t &rmrMessageBuffer,
2261 struct timespec &ts) {
2262 int loglevel = mdclog_level_get();
2263 if (rmrMessageBuffer.rcvMessage == nullptr) {
2265 mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2269 // if (loglevel >= MDCLOG_DEBUG) {
2270 // mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2272 rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2273 if (rmrMessageBuffer.rcvMessage == nullptr) {
2274 mdclog_write(MDCLOG_ERR, "RMR Receiving message with null pointer, Reallocated rmr message buffer");
2275 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2278 ReportingMessages_t message;
2279 message.message.direction = 'D';
2280 message.message.time.tv_nsec = ts.tv_nsec;
2281 message.message.time.tv_sec = ts.tv_sec;
2283 // get message payload
2284 //auto msgData = msg->payload;
2285 if (rmrMessageBuffer.rcvMessage->state != 0) {
2286 mdclog_write(MDCLOG_ERR, "RMR Receiving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2289 rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2290 message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2291 if (message.peerInfo == nullptr) {
2292 auto type = rmrMessageBuffer.rcvMessage->mtype;
2294 case RIC_SCTP_CLEAR_ALL:
2295 case E2_TERM_KEEP_ALIVE_REQ:
2296 case RIC_HEALTH_CHECK_REQ:
2299 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2304 if (rmrMessageBuffer.rcvMessage->mtype != RIC_HEALTH_CHECK_REQ) {
2305 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
2308 switch (rmrMessageBuffer.rcvMessage->mtype) {
2309 case RIC_E2_SETUP_RESP : {
2310 if (loglevel >= MDCLOG_DEBUG) {
2311 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_RESP");
2313 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2316 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2317 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2318 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2319 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2324 case RIC_E2_SETUP_FAILURE : {
2325 if (loglevel >= MDCLOG_DEBUG) {
2326 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_FAILURE");
2328 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2331 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2332 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2333 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2334 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2339 case RIC_ERROR_INDICATION: {
2340 if (loglevel >= MDCLOG_DEBUG) {
2341 mdclog_write(MDCLOG_DEBUG, "RIC_ERROR_INDICATION");
2343 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2344 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2345 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2346 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2352 if (loglevel >= MDCLOG_DEBUG) {
2353 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_REQ");
2355 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2356 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2357 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2358 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2363 case RIC_SUB_DEL_REQ: {
2364 if (loglevel >= MDCLOG_DEBUG) {
2365 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_DEL_REQ");
2367 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2368 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2369 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2370 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2375 case RIC_CONTROL_REQ: {
2376 if (loglevel >= MDCLOG_DEBUG) {
2377 mdclog_write(MDCLOG_DEBUG, "RIC_CONTROL_REQ");
2379 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2380 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2381 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2382 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2387 case RIC_SERVICE_QUERY: {
2388 if (loglevel >= MDCLOG_DEBUG) {
2389 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_QUERY");
2391 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2394 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2395 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2396 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2397 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2402 case RIC_SERVICE_UPDATE_ACK: {
2403 if (loglevel >= MDCLOG_DEBUG) {
2404 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_ACK");
2406 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2407 mdclog_write(MDCLOG_ERR, "error in PER_FromXML");
2410 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2411 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2412 if (loglevel >= MDCLOG_DEBUG) {
2413 mdclog_write(MDCLOG_DEBUG, "Before sending to CU");
2415 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2416 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2421 case RIC_SERVICE_UPDATE_FAILURE: {
2422 if (loglevel >= MDCLOG_DEBUG) {
2423 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_FAILURE");
2425 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2428 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2429 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2430 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2431 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2436 case RIC_E2_RESET_REQ: {
2437 if (loglevel >= MDCLOG_DEBUG) {
2438 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_REQ");
2440 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2443 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2444 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2445 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2446 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2451 case RIC_E2_RESET_RESP: {
2452 if (loglevel >= MDCLOG_DEBUG) {
2453 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_RESP");
2455 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2458 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2459 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2460 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2461 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2466 case RIC_SCTP_CLEAR_ALL: {
2467 mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2468 // loop on all keys and close socket and then erase all map.
2470 sctpMap->getKeys(v);
2471 for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2472 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2473 auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2474 if (peerInfo == nullptr) {
2477 close(peerInfo->fileDescriptor);
2478 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2479 message.message.direction = 'D';
2480 message.message.time.tv_nsec = ts.tv_nsec;
2481 message.message.time.tv_sec = ts.tv_sec;
2483 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2484 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2486 "%s|RIC_SCTP_CLEAR_ALL",
2487 peerInfo->enodbName);
2488 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2489 mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2490 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2491 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2501 case E2_TERM_KEEP_ALIVE_REQ: {
2502 // send message back
2503 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2504 (unsigned char *)rmrMessageBuffer.ka_message,
2505 rmrMessageBuffer.ka_message_len);
2506 rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2507 rmrMessageBuffer.sendMessage->state = 0;
2508 static unsigned char tx[32];
2509 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2510 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2511 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2512 if (rmrMessageBuffer.sendMessage == nullptr) {
2513 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2514 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2515 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2516 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2517 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2518 } else if (loglevel >= MDCLOG_DEBUG) {
2519 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2524 case RIC_HEALTH_CHECK_REQ: {
2525 static int counter = 0;
2526 // send message back
2527 rmr_bytes2payload(rmrMessageBuffer.rcvMessage,
2528 (unsigned char *)"OK",
2530 rmrMessageBuffer.rcvMessage->mtype = RIC_HEALTH_CHECK_RESP;
2531 rmrMessageBuffer.rcvMessage->state = 0;
2532 static unsigned char tx[32];
2533 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2534 rmr_bytes2xact(rmrMessageBuffer.rcvMessage, tx, txLen);
2535 rmrMessageBuffer.rcvMessage = rmr_rts_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2536 //rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2537 if (rmrMessageBuffer.rcvMessage == nullptr) {
2538 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2539 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2540 } else if (rmrMessageBuffer.rcvMessage->state != 0) {
2541 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2542 rmrMessageBuffer.rcvMessage->state, translateRmrErrorMessages(rmrMessageBuffer.rcvMessage->state).c_str());
2543 } else if (loglevel >= MDCLOG_DEBUG && ++counter % 100 == 0) {
2544 mdclog_write(MDCLOG_DEBUG, "Got %d RIC_HEALTH_CHECK_REQ Request send : OK", counter);
2551 mdclog_write(MDCLOG_WARN, "Message Type : %d is not supported", rmrMessageBuffer.rcvMessage->mtype);
2552 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2553 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2554 message.message.time.tv_nsec = ts.tv_nsec;
2555 message.message.time.tv_sec = ts.tv_sec;
2556 message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2558 buildJsonMessage(message);
2563 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2564 mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2570 * Send message to the CU that is not expecting for successful or unsuccessful results
2571 * @param messageBuffer
2573 * @param failedMsgId
2577 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2578 ReportingMessages_t &message,
2580 Sctp_Map_t *sctpMap) {
2581 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2582 mdclog_write(MDCLOG_DEBUG, "send message: %d to %s address", message.message.messageType, message.message.enodbName);
2585 getRequestMetaData(message, messageBuffer);
2586 if (mdclog_level_get() >= MDCLOG_INFO) {
2587 mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2590 auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2597 * @param messageBuffer
2599 * @param failedMesgId
2602 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2603 RmrMessagesBuffer_t &messageBuffer,
2604 ReportingMessages_t &message,
2607 message.message.messageType = messageBuffer.rcvMessage->mtype;
2608 auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2623 int addToEpoll(int epoll_fd,
2624 ConnectedCU_t *peerInfo,
2626 Sctp_Map_t *sctpMap,
2630 struct epoll_event event{};
2631 event.data.ptr = peerInfo;
2632 event.events = events;
2633 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2634 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2635 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here), %s, %s %d",
2636 strerror(errno), __func__, __LINE__);
2638 close(peerInfo->fileDescriptor);
2639 if (enodbName != nullptr) {
2640 cleanHashEntry(peerInfo, sctpMap);
2641 char key[MAX_ENODB_NAME_SIZE * 2];
2642 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2643 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2644 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2646 auto tmp = sctpMap->find(key);
2649 sctpMap->erase(key);
2652 peerInfo->enodbName[0] = 0;
2654 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2670 int modifyToEpoll(int epoll_fd,
2671 ConnectedCU_t *peerInfo,
2673 Sctp_Map_t *sctpMap,
2677 struct epoll_event event{};
2678 event.data.ptr = peerInfo;
2679 event.events = events;
2680 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2681 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2682 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may check not to quit here), %s, %s %d",
2683 strerror(errno), __func__, __LINE__);
2685 close(peerInfo->fileDescriptor);
2686 cleanHashEntry(peerInfo, sctpMap);
2687 char key[MAX_ENODB_NAME_SIZE * 2];
2688 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2689 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2690 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2692 auto tmp = sctpMap->find(key);
2696 sctpMap->erase(key);
2697 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2704 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2705 buildJsonMessage(message);
2707 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2709 if (rmrMessageBuffer.sendMessage == nullptr) {
2710 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2711 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2715 if (rmrMessageBuffer.sendMessage->state != 0) {
2716 char meid[RMR_MAX_MEID]{};
2717 if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2719 rmrMessageBuffer.sendMessage->state = 0;
2720 mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2721 rmrMessageBuffer.sendMessage->mtype,
2722 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2723 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2724 if (rmrMessageBuffer.sendMessage == nullptr) {
2725 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2726 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2728 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2729 mdclog_write(MDCLOG_ERR,
2730 "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2731 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2732 rmrMessageBuffer.sendMessage->mtype,
2733 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2734 auto rc = rmrMessageBuffer.sendMessage->state;
2738 mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2739 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2740 rmrMessageBuffer.sendMessage->mtype,
2741 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2742 return rmrMessageBuffer.sendMessage->state;
2748 void buildJsonMessage(ReportingMessages_t &message) {
2750 message.outLen = sizeof(message.base64Data);
2751 base64::encode((const unsigned char *) message.message.asndata,
2752 (const int) message.message.asnLength,
2755 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2756 mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2757 (int) message.message.asnLength,
2758 (int) message.outLen);
2761 snprintf(message.buffer, sizeof(message.buffer),
2762 "{\"header\": {\"ts\": \"%ld.%09ld\","
2763 "\"ranName\": \"%s\","
2764 "\"messageType\": %d,"
2765 "\"direction\": \"%c\"},"
2766 "\"base64Length\": %d,"
2767 "\"asnBase64\": \"%s\"}",
2768 message.message.time.tv_sec,
2769 message.message.time.tv_nsec,
2770 message.message.enodbName,
2771 message.message.messageType,
2772 message.message.direction,
2773 (int) message.outLen,
2774 message.base64Data);
2775 static src::logger_mt &lg = my_logger::get();
2777 BOOST_LOG(lg) << message.buffer;
2783 * take RMR error code to string
2787 string translateRmrErrorMessages(int state) {
2791 str = "RMR_OK - state is good";
2793 case RMR_ERR_BADARG:
2794 str = "RMR_ERR_BADARG - argument passed to function was unusable";
2796 case RMR_ERR_NOENDPT:
2797 str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2800 str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2803 str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2805 case RMR_ERR_SENDFAILED:
2806 str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2808 case RMR_ERR_CALLFAILED:
2809 str = "RMR_ERR_CALLFAILED - unable to send call() message";
2811 case RMR_ERR_NOWHOPEN:
2812 str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2815 str = "RMR_ERR_WHID - wormhole id was invalid";
2817 case RMR_ERR_OVERFLOW:
2818 str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2821 str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2823 case RMR_ERR_RCVFAILED:
2824 str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2826 case RMR_ERR_TIMEOUT:
2827 str = "RMR_ERR_TIMEOUT - message processing call timed out";
2830 str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2833 str = "RMR_ERR_TRUNC - received message likely truncated";
2835 case RMR_ERR_INITFAILED:
2836 str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2838 case RMR_ERR_NOTSUPP:
2839 str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2843 snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);