1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 // platform project (RICP).
19 // TODO: High-level file comment.
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
32 #include <sys/inotify.h>
35 #include <arpa/inet.h>
38 //using namespace std::placeholders;
39 using namespace boost::filesystem;
40 using namespace prometheus;
48 // need to expose without the include of gcov
49 extern "C" void __gcov_flush(void);
50 #define LOG_FILE_CONFIG_MAP "CONFIG_MAP_NAME"
51 #define E2AP_PPID 70 // as per E2GAP chapter 6.1
53 static void catch_function(int signal) {
59 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
61 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
62 double cpuClock = 0.0;
63 bool jsonTrace = false;
65 char* getinterfaceip()
69 struct hostent *host_entry;
71 retVal = gethostname(hostname, sizeof(hostname));
74 host_entry = gethostbyname(hostname);
75 if ( host_entry == NULL )
77 IP = inet_ntoa(*((struct in_addr*) host_entry->h_addr_list[0]));
82 static int enable_log_change_notify(const char* fileName)
86 if ( lstat(fileName,&fileInfo) == 0 )
88 ret = register_log_change_notify(fileName);
94 static int register_log_change_notify(const char *fileName)
96 pthread_attr_t cb_attr;
98 pthread_attr_init(&cb_attr);
99 pthread_attr_setdetachstate(&cb_attr,PTHREAD_CREATE_DETACHED);
100 return pthread_create(&tid, &cb_attr,&monitor_loglevel_change_handler,(void *)fileName);
104 static void * monitor_loglevel_change_handler(void* arg)
106 char *fileName = (char*) arg;
107 int ifd; // the inotify file des
108 int wfd; // the watched file des
110 char rbuf[4096]; // large read buffer as the event is var len
113 struct timeval timeout;
114 char* dname=NULL; // directory name
115 char* bname = NULL; // basename
117 char* log_level=NULL;
119 dname = strdup( fileName); // defrock the file name into dir and basename
120 if( (tok = strrchr( dname, '/' )) != NULL ) {
122 bname = strdup( tok+1 );
126 ifd = inotify_init1( 0 ); // initialise watcher setting blocking read (no option)
128 fprintf( stderr, "### ERR ### unable to initialise file watch %s\n", strerror( errno ) );
130 wfd = inotify_add_watch( ifd, dname, IN_MOVED_TO | IN_CLOSE_WRITE ); // we only care about close write changes
133 fprintf( stderr, "### ERR ### unable to add watch on config file %s: %s\n", fileName, strerror( errno ) );
137 memset( &timeout, 0, sizeof(timeout) );
142 res = select (ifd + 1, &fds, NULL, NULL, &timeout);
145 n = read( ifd, rbuf, sizeof( rbuf ) ); // read the event
147 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
148 if( errno == EAGAIN ) {
150 printf( "### CRIT ### config listener read err: %s\n", strerror( errno ) );
156 //Retrieving Log Level from configmap by parsing configmap file
157 log_level = parse_file(fileName);
158 update_mdc_log_level_severity(log_level); //setting log level
162 inotify_rm_watch(ifd,wfd);
172 void update_mdc_log_level_severity(char* log_level)
174 mdclog_severity_t level = MDCLOG_ERR;
176 if(log_level == NULL)
178 printf("### ERR ### Invalid Log-Level Configuration in ConfigMap, Default Log-Level Applied: %d\n",level);
180 else if(strcasecmp(log_level,"1")==0)
184 else if(strcasecmp(log_level,"2")==0)
188 else if(strcasecmp(log_level,"3")==0)
192 else if(strcasecmp(log_level,"4")==0)
194 level = MDCLOG_DEBUG;
197 mdclog_level_set(level);
199 static char* parse_file(char* filename)
203 char *string_match = "log-level";
205 FILE *file = fopen ( filename, "r" );
209 while ( fgets ( line, sizeof line, file ) != NULL )
211 token = strtok(line, search);
212 if(strcmp(token,string_match)==0)
215 token = strtok(NULL, search);
216 token = strtok(token, "\n");//removing newline if any
223 return(strdup(token));
228 char *read_env_param(const char*envkey)
232 char *value = getenv(envkey);
234 return strdup(value);
239 void dynamic_log_level_change()
241 char *logFile_Name = read_env_param(LOG_FILE_CONFIG_MAP);
242 char* log_level_init=NULL;
245 log_level_init = parse_file(logFile_Name);
246 update_mdc_log_level_severity(log_level_init); //setting log level
247 free(log_level_init);
250 enable_log_change_notify(logFile_Name);
256 int log_change_monitor = 0;
258 mdclog_attr_init(&attr);
259 mdclog_attr_set_ident(attr, "E2Terminator");
261 if(mdclog_format_initialize(log_change_monitor)!=0)
262 mdclog_write(MDCLOG_ERR, "Failed to intialize MDC log format !!!");
263 dynamic_log_level_change();
264 mdclog_attr_destroy(attr);
266 auto start_time = std::chrono::high_resolution_clock::now();
267 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
270 return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
273 double approx_CPU_MHz(unsigned sleepTime) {
274 using namespace std::chrono_literals;
276 uint64_t cycles_start = rdtscp(aux);
277 double time_start = age();
278 std::this_thread::sleep_for(sleepTime * 1ms);
279 uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
280 double elapsed_time = age() - time_start;
281 return elapsed_cycles / elapsed_time;
284 //std::atomic<int64_t> rmrCounter{0};
285 std::atomic<int64_t> num_of_messages{0};
286 std::atomic<int64_t> num_of_XAPP_messages{0};
287 static long transactionCounter = 0;
288 pthread_mutex_t thread_lock;
290 int buildListeningPort(sctp_params_t &sctpParams) {
291 sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
292 if (sctpParams.listenFD <= 0) {
293 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
294 mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
298 struct sctp_initmsg initmsg;
299 memset (&initmsg, 0, sizeof (initmsg));
300 initmsg.sinit_num_ostreams = 2;
301 initmsg.sinit_max_instreams = 2;
302 initmsg.sinit_max_attempts = 4;
303 setsockopt (sctpParams.listenFD, IPPROTO_SCTP, SCTP_INITMSG, &initmsg, sizeof (initmsg));
305 struct sockaddr_in6 serverAddress {};
306 serverAddress.sin6_family = AF_INET6;
307 serverAddress.sin6_addr = in6addr_any;
308 serverAddress.sin6_port = htons(sctpParams.sctpPort);
309 if (bind(sctpParams.listenFD, (SA *)&serverAddress, sizeof(serverAddress)) < 0 ) {
310 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
311 mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
315 if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
316 //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
319 if (mdclog_level_get() >= MDCLOG_DEBUG) {
320 struct sockaddr_in6 clientAddress {};
321 socklen_t len = sizeof(clientAddress);
322 getsockname(sctpParams.listenFD, (SA *)&clientAddress, &len);
324 inet_ntop(AF_INET6, &clientAddress.sin6_addr, buff, sizeof(buff));
325 mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(clientAddress.sin6_port));
328 if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
329 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
330 mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
334 struct epoll_event event {};
335 event.events = EPOLLIN | EPOLLET;
336 event.data.fd = sctpParams.listenFD;
338 // add listening port to epoll
339 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
340 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
341 printf("Failed to add descriptor to epoll\n");
342 mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
350 int buildConfiguration(sctp_params_t &sctpParams) {
351 path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
353 const int size = 2048;
354 auto fileSize = file_size(p);
355 if (fileSize > size) {
356 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
357 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
362 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
363 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
369 if (conf.openConfigFile(p.string()) == -1) {
370 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
371 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
372 p.string().c_str(), strerror(errno));
376 int rmrPort = conf.getIntValue("nano");
378 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
379 mdclog_write(MDCLOG_ERR, "illegal RMR port ");
383 sctpParams.rmrPort = (uint16_t)rmrPort;
384 snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
385 auto tmpStr = conf.getStringValue("volume");
386 if (tmpStr.length() == 0) {
387 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
388 mdclog_write(MDCLOG_ERR, "illegal volume.");
393 char tmpLogFilespec[VOLUME_URL_SIZE];
394 tmpLogFilespec[0] = 0;
395 sctpParams.volume[0] = 0;
396 snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
397 // copy the name to temp file as well
398 snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
401 // define the file name in the tmp directory under the volume
402 strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
404 sctpParams.myIP = conf.getStringValue("local-ip");
405 if (sctpParams.myIP.length() == 0) {
406 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
407 mdclog_write(MDCLOG_ERR, "illegal local-ip.");
412 int sctpPort = conf.getIntValue("sctp-port");
413 if (sctpPort == -1) {
414 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
415 mdclog_write(MDCLOG_ERR, "illegal SCTP port ");
419 sctpParams.sctpPort = (uint16_t)sctpPort;
421 sctpParams.fqdn = conf.getStringValue("external-fqdn");
422 if (sctpParams.fqdn.length() == 0) {
423 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
424 mdclog_write(MDCLOG_ERR, "illegal external-fqdn");
429 std::string pod = conf.getStringValue("pod_name");
431 if (pod.length() == 0) {
432 mdclog_write(MDCLOG_ERR, "illegal pod_name in config file");
435 auto *podName = getenv(pod.c_str());
436 if (podName == nullptr) {
437 mdclog_write(MDCLOG_ERR, "illegal pod_name or environment variable not exists : %s", pod.c_str());
441 sctpParams.podName.assign(podName);
442 if (sctpParams.podName.length() == 0) {
443 mdclog_write(MDCLOG_ERR, "illegal pod_name");
448 tmpStr = conf.getStringValue("trace");
449 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
450 if ((tmpStr.compare("start")) == 0) {
451 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
452 mdclog_write(MDCLOG_INFO, "Trace set to: start");
453 sctpParams.trace = true;
455 } else if ((tmpStr.compare("stop")) == 0) {
456 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
457 sctpParams.trace = false;
459 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
460 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
461 sctpParams.trace = false;
464 jsonTrace = sctpParams.trace;
466 sctpParams.epollTimeOut = -1;
468 tmpStr = conf.getStringValue("prometheusPort");
469 if (tmpStr.length() != 0) {
470 sctpParams.prometheusPort = tmpStr;
473 sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
475 "\"pod_name\": \"%s\"}",
476 (const char *)sctpParams.myIP.c_str(),
478 sctpParams.fqdn.c_str(),
479 sctpParams.podName.c_str());
481 if (mdclog_level_get() >= MDCLOG_INFO) {
482 mdclog_write(MDCLOG_DEBUG,"RMR Port: %s", to_string(sctpParams.rmrPort).c_str());
483 mdclog_write(MDCLOG_DEBUG,"LogLevel: %s", to_string(sctpParams.logLevel).c_str());
484 mdclog_write(MDCLOG_DEBUG,"volume: %s", sctpParams.volume);
485 mdclog_write(MDCLOG_DEBUG,"tmpLogFilespec: %s", tmpLogFilespec);
486 mdclog_write(MDCLOG_DEBUG,"my ip: %s", sctpParams.myIP.c_str());
487 mdclog_write(MDCLOG_DEBUG,"pod name: %s", sctpParams.podName.c_str());
489 mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
492 // Files written to the current working directory
493 boostLogger = logging::add_file_log(
494 keywords::file_name = tmpLogFilespec, // to temp directory
495 keywords::rotation_size = 10 * 1024 * 1024,
496 keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
497 keywords::format = "%Message%"
498 //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
501 // Setup a destination folder for collecting rotated (closed) files --since the same volume can use rename()
502 boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
503 keywords::target = sctpParams.volume
506 // Upon restart, scan the directory for files matching the file_name pattern
507 boostLogger->locked_backend()->scan_for_files();
509 // Enable auto-flushing after each tmpStr record written
510 if (mdclog_level_get() >= MDCLOG_DEBUG) {
511 boostLogger->locked_backend()->auto_flush(true);
517 void startPrometheus(sctp_params_t &sctpParams) {
518 auto podName = std::getenv("POD_NAME");
519 string metric = "E2TBeta";
520 if (strstr(podName, "alpha") != NULL) {
523 //Get eth0 interface IP
524 char* host = getinterfaceip();
525 string hostip = host;
527 sctpParams.prometheusFamily = &BuildCounter()
528 .Name(metric.c_str())
529 .Help("E2T instance metrics")
530 .Labels({{"POD_NAME", sctpParams.podName}})
531 .Register(*sctpParams.prometheusRegistry);
533 // Build E2T instance level metrics
534 buildE2TPrometheusCounters(sctpParams);
536 string prometheusPath;
538 prometheusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
540 prometheusPath = hostip + ":" + sctpParams.prometheusPort;
542 if (mdclog_level_get() >= MDCLOG_DEBUG) {
543 mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", prometheusPath.c_str());
545 sctpParams.prometheusExposer = new Exposer(prometheusPath, 1);
546 sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
550 int main(const int argc, char **argv) {
551 sctp_params_t sctpParams;
553 std::random_device device{};
554 std::mt19937 generator(device());
555 std::uniform_int_distribution<long> distribution(1, (long) 1e12);
556 transactionCounter = distribution(generator);
560 // uint32_t aux1 = 0;
561 // st = rdtscp(aux1);
563 unsigned num_cpus = std::thread::hardware_concurrency();
565 if (std::signal(SIGINT, catch_function) == SIG_ERR) {
566 mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
569 if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
570 mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
573 if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
574 mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
578 cpuClock = approx_CPU_MHz(100);
580 mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
582 auto result = parse(argc, argv, sctpParams);
584 if (buildConfiguration(sctpParams) != 0) {
588 //auto registry = std::make_shared<Registry>();
589 sctpParams.prometheusRegistry = std::make_shared<Registry>();
591 //sctpParams.prometheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
593 startPrometheus(sctpParams);
596 sctpParams.epoll_fd = epoll_create1(0);
597 if (sctpParams.epoll_fd == -1) {
598 mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
601 getRmrContext(sctpParams);
602 if (sctpParams.rmrCtx == nullptr) {
603 close(sctpParams.epoll_fd);
607 if (buildInotify(sctpParams) == -1) {
608 close(sctpParams.rmrListenFd);
609 rmr_close(sctpParams.rmrCtx);
610 close(sctpParams.epoll_fd);
614 if (buildListeningPort(sctpParams) != 0) {
615 close(sctpParams.rmrListenFd);
616 rmr_close(sctpParams.rmrCtx);
617 close(sctpParams.epoll_fd);
621 sctpParams.sctpMap = new mapWrapper();
623 if (pthread_mutex_init(&thread_lock, NULL) != 0) {
624 mdclog_write(MDCLOG_ERR, "failed to init thread lock");
627 std::vector<std::thread> threads(num_cpus);
628 // std::vector<std::thread> threads;
631 for (unsigned int i = 0; i < num_cpus; i++) {
632 threads[i] = std::thread(listener, &sctpParams);
637 int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
639 mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
644 //loop over term_init until first message from xApp
645 handleTermInit(sctpParams);
647 for (auto &t : threads) {
650 pthread_mutex_destroy(&thread_lock);
654 void handleTermInit(sctp_params_t &sctpParams) {
655 sendTermInit(sctpParams);
656 //send to e2 manager init of e2 term
661 auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
662 if (xappMessages > 0) {
663 if (mdclog_level_get() >= MDCLOG_INFO) {
664 mdclog_write(MDCLOG_INFO, "Got a message from some application, stop sending E2_TERM_INIT");
670 if (count % 1000 == 0) {
671 mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
672 sendTermInit(sctpParams);
677 void sendTermInit(sctp_params_t &sctpParams) {
678 rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
681 msg->mtype = E2_TERM_INIT;
683 rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
684 static unsigned char tx[32];
685 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
686 rmr_bytes2xact(msg, tx, txLen);
687 msg = rmr_send_msg(sctpParams.rmrCtx, msg);
688 if (msg == nullptr) {
689 msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
690 } else if (msg->state == 0) {
692 if (mdclog_level_get() >= MDCLOG_INFO) {
693 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT successfully sent ");
697 if (count % 100 == 0) {
698 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
713 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
714 cxxopts::Options options(argv[0], "e2 term help");
715 options.positional_help("[optional args]").show_positional_help();
716 options.allow_unrecognised_options().add_options()
717 ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
718 ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
719 ("h,help", "Print help");
721 auto result = options.parse(argc, (const char **&)argv);
723 if (result.count("help")) {
724 std::cout << options.help({""}) << std::endl;
733 * @return -1 failed 0 success
735 int buildInotify(sctp_params_t &sctpParams) {
736 sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
737 if (sctpParams.inotifyFD == -1) {
738 mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
742 sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
743 (const char *)sctpParams.configFilePath.c_str(),
744 (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
745 if (sctpParams.inotifyWD == -1) {
746 mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to inotify (inotify_add_watch) %s",
747 sctpParams.configFilePath.c_str(),
749 close(sctpParams.inotifyFD);
753 struct epoll_event event{};
754 event.events = (EPOLLIN);
755 event.data.fd = sctpParams.inotifyFD;
756 // add listening RMR FD to epoll
757 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
758 mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
759 close(sctpParams.inotifyFD);
770 void listener(sctp_params_t *params) {
771 int num_of_SCTP_messages = 0;
772 auto totalTime = 0.0;
773 std::thread::id this_id = std::this_thread::get_id();
775 auto pod_name = std::getenv("POD_NAME");
776 auto container_name = std::getenv("CONTAINER_NAME");
777 auto service_name = std::getenv("SERVICE_NAME");
778 auto host_name = std::getenv("HOST_NAME");
779 auto system_name = std::getenv("SYSTEM_NAME");
780 auto pid = std::to_string(getpid()).c_str();
781 streambuf *oldCout = cout.rdbuf();
782 ostringstream memCout;
784 cout.rdbuf(memCout.rdbuf());
786 //return to the normal cout
790 memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
791 tid[memCout.str().length()] = 0;
792 mdclog_mdc_add("SYSTEM_NAME", system_name);
793 mdclog_mdc_add("HOST_NAME", host_name);
794 mdclog_mdc_add("SERVICE_NAME", service_name);
795 mdclog_mdc_add("CONTAINER_NAME", container_name);
796 mdclog_mdc_add("POD_NAME", pod_name);
797 mdclog_mdc_add("PID", pid);
799 if (mdclog_level_get() >= MDCLOG_DEBUG) {
800 mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
803 RmrMessagesBuffer_t rmrMessageBuffer{};
804 //create and init RMR
805 rmrMessageBuffer.rmrCtx = params->rmrCtx;
807 auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
808 struct timespec end{0, 0};
809 struct timespec start{0, 0};
811 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
812 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
814 memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
815 rmrMessageBuffer.ka_message_len = params->ka_message_length;
816 rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
818 if (mdclog_level_get() >= MDCLOG_DEBUG) {
819 mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
822 ReportingMessages_t message {};
824 // for (int i = 0; i < MAX_RMR_BUFF_ARRAY; i++) {
825 // rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
826 // rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
830 if (mdclog_level_get() >= MDCLOG_DEBUG) {
831 mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
834 auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
836 auto numOfEvents = 1;
838 if (numOfEvents == 0) { // time out
839 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
840 if (mdclog_level_get() >= MDCLOG_DEBUG) {
841 mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
844 } else if (numOfEvents < 0) {
845 if (errno == EINTR) {
846 if (mdclog_level_get() >= MDCLOG_DEBUG) {
847 mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
851 mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
860 for (auto i = 0; i < numOfEvents; i++) {
861 if (mdclog_level_get() >= MDCLOG_DEBUG) {
862 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
864 clock_gettime(CLOCK_MONOTONIC, &message.message.time);
865 start.tv_sec = message.message.time.tv_sec;
866 start.tv_nsec = message.message.time.tv_nsec;
869 if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
870 handlepoll_error(events[i], message, rmrMessageBuffer, params);
871 } else if (events[i].events & EPOLLOUT) {
872 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
873 } else if (params->listenFD == events[i].data.fd) {
874 if (mdclog_level_get() >= MDCLOG_INFO) {
875 mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
877 // new connection is requested from RAN start build connection
879 struct sockaddr in_addr {};
881 char hostBuff[NI_MAXHOST];
882 char portBuff[NI_MAXSERV];
884 in_len = sizeof(in_addr);
885 auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
886 if(peerInfo == nullptr){
887 mdclog_write(MDCLOG_ERR, "calloc failed");
890 peerInfo->sctpParams = params;
891 peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
892 if (peerInfo->fileDescriptor == -1) {
893 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
894 if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
895 /* We have processed all incoming connections. */
904 mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
908 if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
909 mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
910 close(peerInfo->fileDescriptor);
917 struct sctp_event_subscribe sctpevents;
918 memset( (void *)&sctpevents, 0, sizeof(sctpevents) );
919 sctpevents.sctp_data_io_event = 1;
920 setsockopt(peerInfo->fileDescriptor, SOL_SCTP, SCTP_EVENTS,(const void *)&sctpevents, sizeof(sctpevents) );
922 auto ans = getnameinfo(&in_addr, in_len,
923 peerInfo->hostName, NI_MAXHOST,
924 peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
926 mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
927 close(peerInfo->fileDescriptor);
933 if (mdclog_level_get() >= MDCLOG_DEBUG) {
934 mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
936 peerInfo->isConnected = false;
937 peerInfo->gotSetup = false;
938 if (addToEpoll(params->epoll_fd,
941 params->sctpMap, nullptr,
950 } else if (params->rmrListenFd == events[i].data.fd) {
951 // got message from XAPP
952 //num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
953 num_of_messages.fetch_add(1, std::memory_order_release);
954 if (mdclog_level_get() >= MDCLOG_DEBUG) {
955 mdclog_write(MDCLOG_DEBUG, "new RMR message");
957 if (receiveXappMessages(params->sctpMap,
959 message.message.time) != 0) {
960 mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
962 } else if (params->inotifyFD == events[i].data.fd) {
963 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
964 handleConfigChange(params);
966 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
967 * We must read whatever data is available completely, as we are running
968 * in edge-triggered mode and won't get a notification again for the same data. */
969 num_of_messages.fetch_add(1, std::memory_order_release);
970 if (mdclog_level_get() >= MDCLOG_DEBUG) {
971 mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
973 receiveDataFromSctp(&events[i],
975 num_of_SCTP_messages,
977 message.message.time);
980 clock_gettime(CLOCK_MONOTONIC, &end);
981 if (mdclog_level_get() >= MDCLOG_INFO) {
982 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
983 ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
985 if (mdclog_level_get() >= MDCLOG_DEBUG) {
986 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
987 end.tv_sec - start.tv_sec,
988 end.tv_nsec - start.tv_nsec);
1001 void handleConfigChange(sctp_params_t *sctpParams) {
1002 char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
1003 const struct inotify_event *event;
1006 struct inotify_event tmpEvent;
1008 path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
1009 auto endlessLoop = true;
1010 while (endlessLoop) {
1011 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1012 auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
1017 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1018 if (errno != EAGAIN) {
1019 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
1020 endlessLoop = false;
1024 endlessLoop = false;
1030 for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
1032 event = (const struct inotify_event *)ptr;
1034 tmpEvent.mask = (uint32_t)IN_CLOSE_WRITE;
1037 if (event->mask & (uint32_t)IN_ISDIR) {
1041 // the directory name
1042 if (sctpParams->inotifyWD == event->wd) {
1043 // not the directory
1046 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1047 auto retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
1053 // only the file we want
1054 if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
1055 if (mdclog_level_get() >= MDCLOG_INFO) {
1056 mdclog_write(MDCLOG_INFO, "Configuration file changed");
1059 const int size = 2048;
1060 auto fileSize = file_size(p);
1061 if (fileSize > size) {
1062 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
1066 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
1070 ReadConfigFile conf;
1071 if (conf.openConfigFile(p.string()) == -1) {
1072 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
1073 p.string().c_str(), strerror(errno));
1076 auto tmpStr = conf.getStringValue("loglevel");
1077 if (tmpStr.length() == 0) {
1078 mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
1081 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
1083 if ((tmpStr.compare("debug")) == 0) {
1084 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
1085 sctpParams->logLevel = MDCLOG_DEBUG;
1087 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1088 else if ((tmpStr.compare("info")) == 0) {
1089 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
1090 sctpParams->logLevel = MDCLOG_INFO;
1091 } else if ((tmpStr.compare("warning")) == 0) {
1092 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
1093 sctpParams->logLevel = MDCLOG_WARN;
1094 } else if ((tmpStr.compare("error")) == 0) {
1095 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
1096 sctpParams->logLevel = MDCLOG_ERR;
1098 mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
1099 sctpParams->logLevel = MDCLOG_INFO;
1102 mdclog_level_set(sctpParams->logLevel);
1103 tmpStr = conf.getStringValue("trace");
1104 if (tmpStr.length() == 0) {
1105 mdclog_write(MDCLOG_ERR, "illegal trace. Set trace to stop");
1109 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
1110 if ((tmpStr.compare("start")) == 0) {
1111 mdclog_write(MDCLOG_INFO, "Trace set to: start");
1112 sctpParams->trace = true;
1113 } else if ((tmpStr.compare("stop")) == 0) {
1114 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
1115 sctpParams->trace = false;
1117 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
1118 sctpParams->trace = false;
1120 jsonTrace = sctpParams->trace;
1123 endlessLoop = false;
1136 * @param rmrMessageBuffer
1139 void handleEinprogressMessages(struct epoll_event &event,
1140 ReportingMessages_t &message,
1141 RmrMessagesBuffer_t &rmrMessageBuffer,
1142 sctp_params_t *params) {
1143 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
1144 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1146 mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
1148 socklen_t retValLen = 0;
1149 auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
1150 if (rc != 0 || retVal != 0) {
1151 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1153 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1154 "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
1155 peerInfo->enodbName, strerror(errno));
1156 } else if (retVal != 0) {
1157 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1158 "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
1159 peerInfo->enodbName);
1162 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1163 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
1164 mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
1165 message.message.direction = 'N';
1166 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
1167 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1170 memset(peerInfo->asnData, 0, peerInfo->asnLength);
1171 peerInfo->asnLength = 0;
1172 peerInfo->mtype = 0;
1175 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1176 peerInfo->isConnected = true;
1178 if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
1179 peerInfo->mtype) != 0) {
1180 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
1184 message.message.asndata = (unsigned char *)peerInfo->asnData;
1185 message.message.asnLength = peerInfo->asnLength;
1186 message.message.messageType = peerInfo->mtype;
1187 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1188 num_of_messages.fetch_add(1, std::memory_order_release);
1189 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1190 mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
1191 message.message.enodbName);
1193 if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
1194 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1195 mdclog_write(MDCLOG_DEBUG, "Error write to SCTP %s %d", __func__, __LINE__);
1200 memset(peerInfo->asnData, 0, peerInfo->asnLength);
1201 peerInfo->asnLength = 0;
1202 peerInfo->mtype = 0;
1207 void handlepoll_error(struct epoll_event &event,
1208 ReportingMessages_t &message,
1209 RmrMessagesBuffer_t &rmrMessageBuffer,
1210 sctp_params_t *params) {
1211 if ((event.data.fd != params->rmrListenFd) && (event.data.ptr != nullptr)) {
1212 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
1213 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
1214 event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
1215 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1216 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
1217 "%s|Failed SCTP Connection",
1218 peerInfo->enodbName);
1219 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1220 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
1222 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
1223 message.message.direction = 'N';
1224 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
1225 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1228 close(peerInfo->fileDescriptor);
1229 //params->sctpMap->erase(peerInfo->enodbName);
1230 cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
1232 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
1240 int setSocketNoBlocking(int socket) {
1241 auto flags = fcntl(socket, F_GETFL, 0);
1244 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1248 flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1249 if (fcntl(socket, F_SETFL, flags) == -1) {
1250 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1262 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1266 auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1267 char searchBuff[2048]{};
1269 snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1270 if(m->find(searchBuff))
1272 m->erase(searchBuff);
1275 if(m->find(val->enodbName))
1277 mdclog_write(MDCLOG_DEBUG, "remove key enodbName = %s from %s at line %d", val->enodbName, __FUNCTION__, __LINE__);
1278 m->erase(val->enodbName);
1291 * @param fd file descriptor
1292 * @param data the asn data to send
1293 * @param len length of the data
1294 * @param enodbName the enodbName as in the map for printing purpose
1295 * @param m map host information
1296 * @param mtype message number
1297 * @return 0 success, a negative number on fail
1299 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1300 auto loglevel = mdclog_level_get();
1302 int fd = peerInfo->fileDescriptor;
1303 int streamId = fetchStreamId(peerInfo,message);
1305 int fd = FILE_DESCRIPTOR;
1308 if (loglevel >= MDCLOG_DEBUG) {
1309 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1310 message.message.enodbName, __FUNCTION__);
1314 if (sctp_sendmsg(fd,message.message.asndata, message.message.asnLength,(struct sockaddr *) NULL, 0, htonl(E2AP_PPID), 0,streamId,0,0) < 0) {
1315 if (errno == EINTR) {
1318 mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1319 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1320 if (!peerInfo->isConnected) {
1321 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1326 cleanHashEntry(peerInfo, m);
1329 char key[MAX_ENODB_NAME_SIZE * 2];
1330 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1331 message.message.messageType);
1332 if (loglevel >= MDCLOG_DEBUG) {
1333 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1335 auto tmp = m->find(key);
1345 message.message.direction = 'D';
1346 // send report.buffer of size
1347 buildJsonMessage(message);
1349 if (loglevel >= MDCLOG_DEBUG) {
1350 mdclog_write(MDCLOG_DEBUG,
1351 "SCTP message for CU %s sent from %s",
1352 message.message.enodbName,
1362 * @param rmrMessageBuffer
1364 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1365 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1366 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1368 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1369 mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1370 message.message.enodbName, (unsigned long) message.message.asnLength);
1380 * @param numOfMessages
1381 * @param rmrMessageBuffer
1385 int receiveDataFromSctp(struct epoll_event *events,
1386 Sctp_Map_t *sctpMap,
1388 RmrMessagesBuffer_t &rmrMessageBuffer,
1389 struct timespec &ts) {
1390 /* We have data on the fd waiting to be read. Read and display it.
1391 * We must read whatever data is available completely, as we are running
1392 * in edge-triggered mode and won't get a notification again for the same data. */
1393 ReportingMessages_t message {};
1395 auto loglevel = mdclog_level_get();
1396 struct sctp_sndrcvinfo sndrcvinfo;
1400 // get the identity of the interface
1401 if (events->data.ptr != nullptr){
1402 message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1405 struct timespec start{0, 0};
1406 struct timespec decodeStart{0, 0};
1407 struct timespec end{0, 0};
1409 E2AP_PDU_t *pdu = nullptr;
1412 if (loglevel >= MDCLOG_DEBUG) {
1413 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1414 clock_gettime(CLOCK_MONOTONIC, &start);
1416 // read the buffer directly to rmr payload
1417 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1419 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1420 sctp_recvmsg(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE,(struct sockaddr *) NULL, 0, &sndrcvinfo, &flags);
1421 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP fd %d stream %d ", message.peerInfo->fileDescriptor, sndrcvinfo.sinfo_stream);
1422 streamId = sndrcvinfo.sinfo_stream;
1424 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
1428 if (loglevel >= MDCLOG_DEBUG) {
1429 mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1430 message.peerInfo->fileDescriptor, message.message.asnLength);
1433 memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1434 message.message.direction = 'U';
1435 message.message.time.tv_nsec = ts.tv_nsec;
1436 message.message.time.tv_sec = ts.tv_sec;
1438 if (message.message.asnLength < 0) {
1439 if (errno == EINTR) {
1442 /* If errno == EAGAIN, that means we have read all
1443 data. So goReportingMessages_t back to the main loop. */
1444 if (errno != EAGAIN) {
1445 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1447 } else if (loglevel >= MDCLOG_DEBUG) {
1448 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1451 } else if (message.message.asnLength == 0) {
1452 /* End of file. The remote has closed the connection. */
1453 if (loglevel >= MDCLOG_INFO) {
1454 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1455 message.peerInfo->fileDescriptor);
1461 if (loglevel >= MDCLOG_DEBUG) {
1462 char printBuffer[RECEIVE_SCTP_BUFFER_SIZE]{};
1463 char *tmp = printBuffer;
1464 for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1465 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1468 printBuffer[message.message.asnLength] = 0;
1469 clock_gettime(CLOCK_MONOTONIC, &end);
1470 mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1471 message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1472 mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data = : %s", message.message.asnLength,
1474 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1477 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1478 message.message.asndata, message.message.asnLength);
1480 asn_dec_rval_t rval = {RC_OK, 0};
1481 pdu = (E2AP_PDU_t*)rmrMessageBuffer.sendMessage->tp_buf;
1483 if (rval.code != RC_OK) {
1484 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1485 message.peerInfo->enodbName);
1486 if (pdu != nullptr) {
1487 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1493 if (loglevel >= MDCLOG_DEBUG) {
1494 clock_gettime(CLOCK_MONOTONIC, &end);
1495 mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1496 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1499 FILE *stream = open_memstream(&printBuffer, &size);
1500 asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1501 mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1502 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1508 switch (pdu->present) {
1509 case E2AP_PDU_PR_initiatingMessage: {//initiating message
1510 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer, streamId);
1513 case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1514 asnSuccessfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1517 case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1518 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1522 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1525 if (loglevel >= MDCLOG_DEBUG) {
1526 clock_gettime(CLOCK_MONOTONIC, &end);
1527 mdclog_write(MDCLOG_DEBUG,
1528 "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1529 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1533 if (pdu != nullptr) {
1534 // ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu); /* With reset we were not freeing the memory and was causing the leak here. */
1535 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1545 if (loglevel >= MDCLOG_INFO) {
1546 mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1548 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1549 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1551 "%s|CU disconnected unexpectedly",
1552 message.peerInfo->enodbName);
1553 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1554 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1555 if (sendRequestToXapp(message,
1556 RIC_SCTP_CONNECTION_FAILURE,
1557 rmrMessageBuffer) != 0) {
1558 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1562 /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1564 pthread_mutex_lock(&thread_lock);
1565 if (fcntl(message.peerInfo->fileDescriptor, F_GETFD) != -1) {
1566 mdclog_write(MDCLOG_DEBUG, "Closing connection - descriptor = %d", message.peerInfo->fileDescriptor);
1567 close(message.peerInfo->fileDescriptor);
1568 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1570 pthread_mutex_unlock(&thread_lock);
1572 close(message.peerInfo->fileDescriptor);
1573 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1576 if (loglevel >= MDCLOG_DEBUG) {
1577 clock_gettime(CLOCK_MONOTONIC, &end);
1578 mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1579 end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1585 static void buildAndSendSetupRequest(ReportingMessages_t &message,
1586 RmrMessagesBuffer_t &rmrMessageBuffer,
1588 string const &messageName,
1589 string const &ieName,
1590 vector<string> &functionsToAdd_v,
1591 vector<string> &functionsToModified_v*/) {
1592 auto logLevel = mdclog_level_get();
1593 // now we can send the data to e2Mgr
1596 auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1597 unsigned char *buffer = nullptr;
1598 buffer = (unsigned char *) calloc(buffer_size, sizeof(unsigned char));
1601 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1602 mdclog_write(MDCLOG_ERR, "Allocating buffer for %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1607 er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1608 if (er.encoded == -1) {
1609 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1610 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1613 } else if (er.encoded > (ssize_t) buffer_size) {
1614 buffer_size = er.encoded + 128;
1615 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
1616 mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1618 asn_DEF_E2AP_PDU.name, buffer_size);
1619 buffer_size = er.encoded + 128;
1621 unsigned char *newBuffer = nullptr;
1622 newBuffer = (unsigned char *) realloc(buffer, buffer_size);
1626 mdclog_write(MDCLOG_ERR, "Reallocating buffer for %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1635 buffer[er.encoded] = '\0';
1640 string res((char *)buffer);
1641 res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1642 res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1643 res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1646 // if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1647 // res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1650 // if (res.length() == 0) {
1651 // rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1652 // rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1653 // message.peerInfo->sctpParams->myIP.c_str(),
1654 // message.peerInfo->sctpParams->rmrPort,
1657 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1658 rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1659 message.peerInfo->sctpParams->myIP.c_str(),
1660 message.peerInfo->sctpParams->rmrPort,
1664 if (logLevel >= MDCLOG_DEBUG) {
1665 mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1668 rmrMsg->mtype = message.message.messageType;
1670 rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1672 static unsigned char tx[32];
1673 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1674 rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1676 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1678 if (rmrMsg == nullptr) {
1679 mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1680 } else if (rmrMsg->state != 0) {
1681 char meid[RMR_MAX_MEID]{};
1682 if (rmrMsg->state == RMR_ERR_RETRY) {
1685 mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1686 rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1688 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1690 if (rmrMsg == nullptr) {
1691 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1692 } else if (rmrMsg->state != 0) {
1693 mdclog_write(MDCLOG_ERR,
1694 "RMR Retry failed %s sending request %d to Xapp from %s",
1695 translateRmrErrorMessages(rmrMsg->state).c_str(),
1697 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1700 mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1701 translateRmrErrorMessages(rmrMsg->state).c_str(),
1703 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1706 message.peerInfo->gotSetup = true;
1707 buildJsonMessage(message);
1709 if (rmrMsg != nullptr) {
1710 rmr_free_msg(rmrMsg);
1719 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1721 runFunXML_v.clear();
1722 for (auto j = 0; j < list.list.count; j++) {
1723 auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1724 if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1725 (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1727 E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1728 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1729 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1730 (void **)&ranFunDef,
1731 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1732 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1733 if (rval.code != RC_OK) {
1734 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1736 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1740 auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1741 unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1742 memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1744 auto er = asn_encode_to_buffer(nullptr,
1746 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1750 if (er.encoded == -1) {
1751 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1752 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1754 } else if (er.encoded > (ssize_t)xml_buffer_size) {
1755 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1756 (int) xml_buffer_size,
1757 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1759 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1760 mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1761 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1766 string runFuncs = (char *)(xml_buffer);
1767 runFunXML_v.emplace_back(runFuncs);
1774 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1775 Sctp_Map_t *sctpMap,
1776 ReportingMessages_t &message,
1777 vector <string> &RANfunctionsAdded_v,
1778 vector <string> &RANfunctionsModified_v) {
1779 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1780 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1781 auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1782 if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1783 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1784 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1785 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1786 ie->value.choice.RANfunctions_List.list.count);
1788 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1792 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1793 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1794 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1795 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1796 ie->value.choice.RANfunctions_List.list.count);
1798 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1804 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1805 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1806 RANfunctionsAdded_v.size());
1814 void buildE2TPrometheusCounters(sctp_params_t &sctpParams) {
1815 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &sctpParams.prometheusFamily->Add({{"counter", "SetupRequestMsgs"}});
1816 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &sctpParams.prometheusFamily->Add({{"counter", "SetupRequestBytes"}});
1818 sctpParams.e2tCounters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &sctpParams.prometheusFamily->Add({{"counter", "SetupResponseMsgs"}});
1819 sctpParams.e2tCounters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &sctpParams.prometheusFamily->Add({{"counter", "SetupResponseBytes"}});
1821 sctpParams.e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup] = &sctpParams.prometheusFamily->Add({{"counter", "SetupRequestFailureMsgs"}});
1822 sctpParams.e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup] = &sctpParams.prometheusFamily->Add({{"counter", "SetupRequestFailureBytes"}});
1824 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateMsgs"}});
1825 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateBytes"}});
1827 sctpParams.e2tCounters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateResponseMsgs"}});
1828 sctpParams.e2tCounters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateResponseBytes"}});
1830 sctpParams.e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateFailureMsgs"}});
1831 sctpParams.e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "E2NodeConfigUpdateFailureBytes"}});
1833 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &sctpParams.prometheusFamily->Add({{"counter", "ErrorIndicationMsgs"}});
1834 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &sctpParams.prometheusFamily->Add({{"counter", "ErrorIndicationBytes"}});
1836 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset] = &sctpParams.prometheusFamily->Add({{"counter", "ResetRequestMsgs"}});
1837 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset] = &sctpParams.prometheusFamily->Add({{"counter", "ResetRequestBytes"}});
1839 sctpParams.e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset] = &sctpParams.prometheusFamily->Add({{"counter", "ResetAckMsgs"}});
1840 sctpParams.e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset] = &sctpParams.prometheusFamily->Add({{"counter", "ResetAckBytes"}});
1842 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateMsgs"}});
1843 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateBytes"}});
1845 sctpParams.e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateRespMsgs"}});
1846 sctpParams.e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateRespBytes"}});
1848 sctpParams.e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateFailureMsgs"}});
1849 sctpParams.e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceUpdateFailureBytes"}});
1851 sctpParams.e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlMsgs"}});
1852 sctpParams.e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlBytes"}});
1854 sctpParams.e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlAckMsgs"}});
1855 sctpParams.e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlAckBytes"}});
1857 sctpParams.e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlFailureMsgs"}});
1858 sctpParams.e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol] = &sctpParams.prometheusFamily->Add({{"counter", "RICControlFailureBytes"}});
1860 sctpParams.e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionMsgs"}});
1861 sctpParams.e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionBytes"}});
1863 sctpParams.e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionAckMsgs"}});
1864 sctpParams.e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionAckBytes"}});
1866 sctpParams.e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionFailureMsgs"}});
1867 sctpParams.e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionFailureBytes"}});
1869 sctpParams.e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteMsgs"}});
1870 sctpParams.e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteBytes"}});
1872 sctpParams.e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteAckMsgs"}});
1873 sctpParams.e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteAckBytes"}});
1875 sctpParams.e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteFailMsgs"}});
1876 sctpParams.e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteFailBytes"}});
1878 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication] = &sctpParams.prometheusFamily->Add({{"counter", "RICIndicationMsgs"}});
1879 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication] = &sctpParams.prometheusFamily->Add({{"counter", "RICIndicationBytes"}});
1881 sctpParams.e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceQueryMsgs"}});
1882 sctpParams.e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery] = &sctpParams.prometheusFamily->Add({{"counter", "RICServiceQueryBytes"}});
1884 sctpParams.e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteRequiredMsgs"}});
1885 sctpParams.e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired] = &sctpParams.prometheusFamily->Add({{"counter", "RICSubscriptionDeleteRequiredBytes"}});
1889 void buildPrometheusList(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1890 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1891 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1893 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"E2NodeConfigUpdate", "Messages"}});
1894 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"E2NodeConfigUpdate", "Bytes"}});
1896 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1897 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1899 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1900 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1902 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1903 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1905 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1906 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1907 // ---------------------------------------------
1908 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1909 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1911 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1912 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1914 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1915 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1917 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1918 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1919 //-------------------------------------------------------------
1921 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1922 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1924 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1925 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1927 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1928 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1930 //====================================================================================
1931 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1932 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1934 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1935 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1937 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1938 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1940 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1941 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1943 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1944 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1946 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1947 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1948 //---------------------------------------------------------------------------------------------------------
1949 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1950 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1952 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"E2NodeConfigUpdateSuccess", "Messages"}});
1953 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"E2NodeConfigUpdateSuccess", "Bytes"}});
1955 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1956 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1958 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1959 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1960 //----------------------------------------------------------------------------------------------------------------
1961 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1962 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1964 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"E2NodeConfigUpdateFailure", "Messages"}});
1965 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2nodeConfigurationUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"E2NodeConfigUpdateFailure", "Bytes"}});
1967 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1968 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1971 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDeleteRequired)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteRequired", "Messages"}});
1972 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDeleteRequired)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteRequired", "Bytes"}});
1981 * @param RANfunctionsAdded_v
1984 int collectSetupRequestData(E2AP_PDU_t *pdu,
1985 Sctp_Map_t *sctpMap,
1986 ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1987 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1988 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1989 auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1990 if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1991 // get the ran name for meid
1992 if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1993 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1994 mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1995 // no message will be sent
1999 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
2000 sctpMap->setkey(message.message.enodbName, message.peerInfo);
2002 } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
2003 if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
2004 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2005 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
2006 ie->value.choice.RANfunctions_List.list.count);
2008 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
2014 // if (mdclog_level_get() >= MDCLOG_DEBUG) {
2015 // mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
2016 // RANfunctionsAdded_v.size());
2021 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2022 E2AP_PDU_t *pdu = nullptr;
2024 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2025 mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
2026 rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
2028 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2029 rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
2030 if (rval.code != RC_OK) {
2031 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2033 message.message.enodbName);
2034 if (pdu != nullptr) {
2035 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2041 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2042 auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
2043 rmrMessageBuffer.sendMessage->payload, buff_size);
2044 if (er.encoded == -1) {
2045 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2046 if (pdu != nullptr) {
2047 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2051 } else if (er.encoded > (ssize_t)buff_size) {
2052 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2053 (int)rmrMessageBuffer.sendMessage->len,
2054 asn_DEF_E2AP_PDU.name,
2057 if (pdu != nullptr) {
2058 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2063 rmrMessageBuffer.sendMessage->len = er.encoded;
2064 if (pdu != nullptr) {
2065 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2076 * @param rmrMessageBuffer
2078 void asnInitiatingRequest(E2AP_PDU_t *pdu,
2079 Sctp_Map_t *sctpMap,
2080 ReportingMessages_t &message,
2081 RmrMessagesBuffer_t &rmrMessageBuffer, int streamId) {
2082 auto logLevel = mdclog_level_get();
2083 auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
2084 if (logLevel >= MDCLOG_DEBUG) {
2085 mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
2087 switch (procedureCode) {
2088 case ProcedureCode_id_E2setup: {
2089 if (logLevel >= MDCLOG_DEBUG) {
2090 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
2093 // vector <string> RANfunctionsAdded_v;
2094 // vector <string> RANfunctionsModified_v;
2095 // RANfunctionsAdded_v.clear();
2096 // RANfunctionsModified_v.clear();
2097 if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
2100 struct sctp_status status;
2101 int stat_size = sizeof(status);
2102 getsockopt( message.peerInfo->fileDescriptor, SOL_SCTP, SCTP_STATUS,(void *)&status, (socklen_t *)&stat_size );
2103 if (logLevel >= MDCLOG_DEBUG) {
2104 mdclog_write(MDCLOG_DEBUG, "Start from SCTP %d fd", message.peerInfo->fileDescriptor);
2105 mdclog_write(MDCLOG_DEBUG, "SCTP status assoc id %d instrms %d outstrms %d", status.sstat_assoc_id,
2106 status.sstat_instrms, status.sstat_outstrms);
2108 if(status.sstat_outstrms == 1 || status.sstat_instrms == 1)
2110 message.peerInfo->isSingleStream = true;
2111 message.peerInfo->singleStreamId = streamId;
2112 if (status.sstat_outstrms == 1 && status.sstat_instrms == 1){
2113 if (logLevel >= MDCLOG_DEBUG) {
2114 mdclog_write(MDCLOG_DEBUG, "Single SCTP stream is used for sending from now on, assoc id %d streamId %d #instrms %d #outstrms %d, %s",status.sstat_assoc_id, streamId, status.sstat_instrms, status.sstat_outstrms, __FUNCTION__);
2118 mdclog_write(MDCLOG_ERR, "Single SCTP stream used for sending messages even if there is a mismatch in number of in & out streams, assoc id %d instrms %d outstrms %d", status.sstat_assoc_id,
2119 status.sstat_instrms, status.sstat_outstrms);
2123 buildPrometheusList(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
2125 string messageName("E2setupRequest");
2126 string ieName("E2setupRequestIEs");
2127 message.message.messageType = RIC_E2_SETUP_REQ;
2128 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2129 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
2131 // Update E2T instance level metrics
2132 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2133 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
2135 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
2138 case ProcedureCode_id_RICserviceUpdate: {
2139 if (logLevel >= MDCLOG_DEBUG) {
2140 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
2142 // vector <string> RANfunctionsAdded_v;
2143 // vector <string> RANfunctionsModified_v;
2144 // RANfunctionsAdded_v.clear();
2145 // RANfunctionsModified_v.clear();
2146 // if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
2147 // RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
2151 string messageName("RICserviceUpdate");
2152 string ieName("RICserviceUpdateIEs");
2153 message.message.messageType = RIC_SERVICE_UPDATE;
2154 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2155 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2156 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
2158 // Update E2T instance level metrics
2159 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2160 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
2162 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
2166 case ProcedureCode_id_E2nodeConfigurationUpdate: {
2167 if (logLevel >= MDCLOG_DEBUG) {
2168 mdclog_write(MDCLOG_DEBUG, "Got E2nodeConfigurationUpdate %s", message.message.enodbName);
2171 string messageName("RICE2nodeConfigurationUpdate");
2172 string ieName("RICE2nodeConfigurationUpdateIEs");
2173 message.message.messageType = RIC_E2NODE_CONFIG_UPDATE;
2174 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2175 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2176 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment((double)message.message.asnLength);
2178 // Update E2T instance level metrics
2179 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2180 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment((double)message.message.asnLength);
2182 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
2186 case ProcedureCode_id_ErrorIndication: {
2187 if (logLevel >= MDCLOG_DEBUG) {
2188 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
2190 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2191 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2192 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
2194 // Update E2T instance level metrics
2195 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2196 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
2198 if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
2199 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
2203 case ProcedureCode_id_Reset: {
2204 if (logLevel >= MDCLOG_DEBUG) {
2205 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
2207 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2208 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2209 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
2211 // Update E2T instance level metrics
2212 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2213 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
2215 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
2219 if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
2220 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
2224 case ProcedureCode_id_RICindication: {
2225 if (logLevel >= MDCLOG_DEBUG) {
2226 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
2228 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
2229 auto messageSent = false;
2230 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
2231 if (logLevel >= MDCLOG_DEBUG) {
2232 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
2234 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
2235 if (logLevel >= MDCLOG_DEBUG) {
2236 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
2238 if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
2239 static unsigned char tx[32];
2240 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
2241 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2242 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
2243 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
2244 (unsigned char *)message.message.enodbName,
2245 strlen(message.message.enodbName));
2246 rmrMessageBuffer.sendMessage->state = 0;
2247 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
2249 //ie->value.choice.RICrequestID.ricInstanceID;
2250 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2251 mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
2252 rmrMessageBuffer.sendMessage->sub_id,
2253 rmrMessageBuffer.sendMessage->mtype,
2254 ie->value.choice.RICrequestID.ricInstanceID,
2255 ie->value.choice.RICrequestID.ricRequestorID);
2257 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2258 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
2259 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
2261 // Update E2T instance level metrics
2262 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
2263 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
2265 sendRmrMessage(rmrMessageBuffer, message);
2268 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
2277 case ProcedureCode_id_RICsubscriptionDeleteRequired: {
2278 if (logLevel >= MDCLOG_DEBUG) {
2279 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDeleteRequired %s", message.message.enodbName);
2281 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2282 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired]->Increment();
2283 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired]->Increment((double)message.message.asnLength);
2285 // Update E2T instance level metrics
2286 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired]->Increment();
2287 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDeleteRequired]->Increment((double)message.message.asnLength);
2289 if (sendRequestToXapp(message, RIC_SUB_DEL_REQUIRED, rmrMessageBuffer) != 0) {
2290 mdclog_write(MDCLOG_ERR, "Subscription Delete Required message failed to send to xAPP");
2296 mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
2297 message.message.messageType = 0; // no RMR message type yet
2299 buildJsonMessage(message);
2310 * @param rmrMessageBuffer
2312 void asnSuccessfulMsg(E2AP_PDU_t *pdu,
2313 Sctp_Map_t *sctpMap,
2314 ReportingMessages_t &message,
2315 RmrMessagesBuffer_t &rmrMessageBuffer) {
2316 auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
2317 auto logLevel = mdclog_level_get();
2318 if (logLevel >= MDCLOG_INFO) {
2319 mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
2321 switch (procedureCode) {
2322 case ProcedureCode_id_Reset: {
2323 if (logLevel >= MDCLOG_DEBUG) {
2324 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
2326 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2327 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2328 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
2330 // Update E2T instance level metrics
2331 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2332 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
2334 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
2337 if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
2338 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
2342 case ProcedureCode_id_RICcontrol: {
2343 if (logLevel >= MDCLOG_DEBUG) {
2344 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
2347 i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
2348 auto messageSent = false;
2349 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
2350 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2351 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
2353 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
2354 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2355 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
2357 if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
2358 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
2359 rmrMessageBuffer.sendMessage->state = 0;
2360 // rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
2361 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
2363 static unsigned char tx[32];
2364 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2365 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
2366 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
2367 (unsigned char *)message.message.enodbName,
2368 strlen(message.message.enodbName));
2369 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2370 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2371 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
2373 // Update E2T instance level metrics
2374 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2375 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
2377 sendRmrMessage(rmrMessageBuffer, message);
2380 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
2390 case ProcedureCode_id_RICsubscription: {
2391 if (logLevel >= MDCLOG_DEBUG) {
2392 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
2394 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2395 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2396 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2398 // Update E2T instance level metrics
2399 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2400 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2402 if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
2403 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
2407 case ProcedureCode_id_RICsubscriptionDelete: {
2408 if (logLevel >= MDCLOG_DEBUG) {
2409 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
2411 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2412 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2413 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2415 // Update E2T instance level metrics
2416 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2417 message.peerInfo->sctpParams->e2tCounters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2419 if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
2420 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
2425 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
2426 message.message.messageType = 0; // no RMR message type yet
2427 buildJsonMessage(message);
2438 * @param rmrMessageBuffer
2440 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
2441 Sctp_Map_t *sctpMap,
2442 ReportingMessages_t &message,
2443 RmrMessagesBuffer_t &rmrMessageBuffer) {
2444 auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
2445 auto logLevel = mdclog_level_get();
2446 if (logLevel >= MDCLOG_INFO) {
2447 mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
2449 switch (procedureCode) {
2450 case ProcedureCode_id_RICcontrol: {
2451 if (logLevel >= MDCLOG_DEBUG) {
2452 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
2455 i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
2456 auto messageSent = false;
2457 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
2458 if (logLevel >= MDCLOG_DEBUG) {
2459 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
2461 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
2462 if (logLevel >= MDCLOG_DEBUG) {
2463 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
2465 if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
2466 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
2467 rmrMessageBuffer.sendMessage->state = 0;
2468 // rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
2469 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
2470 static unsigned char tx[32];
2471 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2472 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
2473 rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
2474 strlen(message.message.enodbName));
2475 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2476 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2477 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
2479 // Update E2T instance level metrics
2480 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2481 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
2483 sendRmrMessage(rmrMessageBuffer, message);
2486 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
2495 case ProcedureCode_id_RICsubscription: {
2496 if (logLevel >= MDCLOG_DEBUG) {
2497 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
2499 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2500 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2501 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2503 // Update E2T instance level metrics
2504 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2505 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
2507 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
2508 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
2512 case ProcedureCode_id_RICsubscriptionDelete: {
2513 if (logLevel >= MDCLOG_DEBUG) {
2514 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
2516 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2517 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2518 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2520 // Update E2T instance level metrics
2521 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2522 message.peerInfo->sctpParams->e2tCounters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
2524 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
2525 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
2530 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
2531 message.message.messageType = 0; // no RMR message type yet
2532 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2533 buildJsonMessage(message);
2544 * @param rmrMmessageBuffer
2547 int sendRequestToXapp(ReportingMessages_t &message,
2549 RmrMessagesBuffer_t &rmrMmessageBuffer) {
2550 rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
2551 (unsigned char *)message.message.enodbName,
2552 strlen(message.message.enodbName));
2553 message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
2554 rmrMmessageBuffer.sendMessage->state = 0;
2555 static unsigned char tx[32];
2556 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2557 rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
2559 auto rc = sendRmrMessage(rmrMmessageBuffer, message);
2565 * @param pSctpParams
2567 void getRmrContext(sctp_params_t &pSctpParams) {
2568 pSctpParams.rmrCtx = nullptr;
2569 pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
2570 if (pSctpParams.rmrCtx == nullptr) {
2571 mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
2575 rmr_set_stimeout(pSctpParams.rmrCtx, 0); // disable retries for any send operation
2576 // we need to find that routing table exist and we can run
2577 if (mdclog_level_get() >= MDCLOG_INFO) {
2578 mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2583 if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2587 if (count % 60 == 0) {
2588 mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2591 if (mdclog_level_get() >= MDCLOG_INFO) {
2592 mdclog_write(MDCLOG_INFO, "RMR running");
2594 rmr_init_trace(pSctpParams.rmrCtx, 200);
2595 // get the RMR fd for the epoll
2596 pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2597 struct epoll_event event{};
2598 // add RMR fd to epoll
2599 event.events = (EPOLLIN);
2600 event.data.fd = pSctpParams.rmrListenFd;
2601 // add listening RMR FD to epoll
2602 if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2603 mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2604 close(pSctpParams.rmrListenFd);
2605 rmr_close(pSctpParams.rmrCtx);
2606 pSctpParams.rmrCtx = nullptr;
2613 * @param rmrMessageBuffer
2616 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2617 E2AP_PDU_t *pdu = nullptr;
2619 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2620 mdclog_write(MDCLOG_DEBUG, "got xml Format data from xApp of size %d is:%s",
2621 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2623 auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2624 rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2625 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2626 mdclog_write(MDCLOG_DEBUG, "%s After decoding the XML to PDU", __func__ );
2628 if (rval.code != RC_OK) {
2632 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2634 message.message.enodbName);
2635 if (pdu != nullptr) {
2636 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2642 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2643 auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2644 rmrMessageBuffer.rcvMessage->payload, buff_size);
2645 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2646 mdclog_write(MDCLOG_DEBUG, "%s After encoding PDU to PER", __func__ );
2648 if (er.encoded == -1) {
2649 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2650 if (pdu != nullptr) {
2651 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2655 } else if (er.encoded > (ssize_t)buff_size) {
2656 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2657 (int)rmrMessageBuffer.rcvMessage->len,
2658 asn_DEF_E2AP_PDU.name,
2661 if (pdu != nullptr) {
2662 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2667 rmrMessageBuffer.rcvMessage->len = er.encoded;
2668 if (pdu != nullptr) {
2669 ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
2678 * @param rmrMessageBuffer
2682 int receiveXappMessages(Sctp_Map_t *sctpMap,
2683 RmrMessagesBuffer_t &rmrMessageBuffer,
2684 struct timespec &ts) {
2685 int loglevel = mdclog_level_get();
2686 if (rmrMessageBuffer.rcvMessage == nullptr) {
2688 mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2692 // if (loglevel >= MDCLOG_DEBUG) {
2693 // mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2695 rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2696 if (rmrMessageBuffer.rcvMessage == nullptr) {
2697 mdclog_write(MDCLOG_ERR, "RMR Receiving message with null pointer, Reallocated rmr message buffer");
2698 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2701 ReportingMessages_t message;
2702 message.message.direction = 'D';
2703 message.message.time.tv_nsec = ts.tv_nsec;
2704 message.message.time.tv_sec = ts.tv_sec;
2706 // get message payload
2707 //auto msgData = msg->payload;
2709 rmrMessageBuffer.rcvMessage->state = 0;
2711 if (rmrMessageBuffer.rcvMessage->state != 0) {
2712 mdclog_write(MDCLOG_ERR, "RMR Receiving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2715 rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2716 message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2717 if (message.peerInfo == nullptr) {
2718 auto type = rmrMessageBuffer.rcvMessage->mtype;
2720 case RIC_SCTP_CLEAR_ALL:
2721 case E2_TERM_KEEP_ALIVE_REQ:
2722 case RIC_HEALTH_CHECK_REQ:
2728 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2733 if (rmrMessageBuffer.rcvMessage->mtype != RIC_HEALTH_CHECK_REQ) {
2734 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
2737 switch (rmrMessageBuffer.rcvMessage->mtype) {
2738 case RIC_E2_SETUP_RESP : {
2739 if (loglevel >= MDCLOG_DEBUG) {
2740 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_RESP");
2742 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2745 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2746 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2747 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2749 // Update E2T instance level metrics
2750 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2751 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2753 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2754 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2759 case RIC_E2_SETUP_FAILURE : {
2760 if (loglevel >= MDCLOG_DEBUG) {
2761 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_FAILURE");
2763 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2766 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2767 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2768 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2770 // Update E2T instance level metrics
2771 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2772 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2774 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2775 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2781 case RIC_E2NODE_CONFIG_UPDATE_ACK: {
2782 if (loglevel >= MDCLOG_DEBUG) {
2783 mdclog_write(MDCLOG_DEBUG, "RIC_E2NODE_CONFIG_UPDATE_ACK");
2785 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2788 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2789 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2790 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2792 // Update E2T instance level metrics
2793 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2794 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2796 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2797 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2NODE_CONFIG_UPDATE_ACK");
2803 case RIC_E2NODE_CONFIG_UPDATE_FAILURE: {
2804 if (loglevel >= MDCLOG_DEBUG) {
2805 mdclog_write(MDCLOG_DEBUG, "RIC_E2NODE_CONFIG_UPDATE_FAILURE");
2807 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2810 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2811 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2812 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2814 // Update E2T instance level metrics
2815 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment();
2816 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2nodeConfigurationUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2818 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2819 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2NODE_CONFIG_UPDATE_FAILURE");
2825 case RIC_ERROR_INDICATION: {
2826 if (loglevel >= MDCLOG_DEBUG) {
2827 mdclog_write(MDCLOG_DEBUG, "RIC_ERROR_INDICATION");
2829 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2830 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2831 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2833 // Update E2T instance level metrics
2834 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2835 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2837 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2838 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2844 if (loglevel >= MDCLOG_DEBUG) {
2845 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_REQ");
2847 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2848 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2849 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2851 // Update E2T instance level metrics
2852 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2853 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2855 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2856 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2861 case RIC_SUB_DEL_REQ: {
2862 if (loglevel >= MDCLOG_DEBUG) {
2863 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_DEL_REQ");
2865 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2866 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2867 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2869 // Update E2T instance level metrics
2870 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2871 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2873 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2874 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2879 case RIC_CONTROL_REQ: {
2880 if (loglevel >= MDCLOG_DEBUG) {
2881 mdclog_write(MDCLOG_DEBUG, "RIC_CONTROL_REQ");
2883 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2884 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2885 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2887 // Update E2T instance level metrics
2888 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2889 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2891 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2892 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2897 case RIC_SERVICE_QUERY: {
2898 if (loglevel >= MDCLOG_DEBUG) {
2899 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_QUERY");
2901 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2904 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2905 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2906 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2908 // Update E2T instance level metrics
2909 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2910 message.peerInfo->sctpParams->e2tCounters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2912 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2913 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2918 case RIC_SERVICE_UPDATE_ACK: {
2919 if (loglevel >= MDCLOG_DEBUG) {
2920 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_ACK");
2922 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2923 mdclog_write(MDCLOG_ERR, "error in PER_FromXML");
2926 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2927 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2928 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2930 // Update E2T instance level metrics
2931 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2932 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2934 if (loglevel >= MDCLOG_DEBUG) {
2935 mdclog_write(MDCLOG_DEBUG, "Before sending to CU");
2937 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2938 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2943 case RIC_SERVICE_UPDATE_FAILURE: {
2944 if (loglevel >= MDCLOG_DEBUG) {
2945 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_FAILURE");
2947 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2950 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2951 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2952 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2954 // Update E2T instance level metrics
2955 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2956 message.peerInfo->sctpParams->e2tCounters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2958 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2959 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2964 case RIC_E2_RESET_REQ: {
2965 if (loglevel >= MDCLOG_DEBUG) {
2966 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_REQ");
2968 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2971 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2972 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2973 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2975 // Update E2T instance level metrics
2976 message.peerInfo->sctpParams->e2tCounters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2977 message.peerInfo->sctpParams->e2tCounters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2979 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2980 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2985 case RIC_E2_RESET_RESP: {
2986 if (loglevel >= MDCLOG_DEBUG) {
2987 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_RESP");
2989 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2992 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
2993 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2994 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2996 // Update E2T instance level metrics
2997 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2998 message.peerInfo->sctpParams->e2tCounters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
3000 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
3001 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
3006 case RIC_SCTP_CLEAR_ALL: {
3007 mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
3008 // loop on all keys and close socket and then erase all map.
3010 sctpMap->getKeys(v);
3011 for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
3012 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
3013 auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
3014 if (peerInfo == nullptr) {
3017 close(peerInfo->fileDescriptor);
3018 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
3019 message.message.direction = 'D';
3020 message.message.time.tv_nsec = ts.tv_nsec;
3021 message.message.time.tv_sec = ts.tv_sec;
3023 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
3024 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
3026 "%s|RIC_SCTP_CLEAR_ALL",
3027 peerInfo->enodbName);
3028 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
3029 mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
3030 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
3031 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
3042 case E2_TERM_KEEP_ALIVE_REQ: {
3043 // send message back
3044 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
3045 (unsigned char *)rmrMessageBuffer.ka_message,
3046 rmrMessageBuffer.ka_message_len);
3047 rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
3048 rmrMessageBuffer.sendMessage->state = 0;
3049 static unsigned char tx[32];
3050 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
3051 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
3052 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
3053 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
3055 if (rmrMessageBuffer.sendMessage == nullptr) {
3056 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
3057 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
3058 } else if (rmrMessageBuffer.sendMessage->state != 0) {
3059 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
3060 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
3061 } else if (loglevel >= MDCLOG_DEBUG) {
3062 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
3067 case RIC_HEALTH_CHECK_REQ: {
3068 static int counter = 0;
3069 // send message back
3070 rmr_bytes2payload(rmrMessageBuffer.rcvMessage,
3071 (unsigned char *)"OK",
3073 rmrMessageBuffer.rcvMessage->mtype = RIC_HEALTH_CHECK_RESP;
3074 rmrMessageBuffer.rcvMessage->state = 0;
3075 static unsigned char tx[32];
3076 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
3077 rmr_bytes2xact(rmrMessageBuffer.rcvMessage, tx, txLen);
3078 rmrMessageBuffer.rcvMessage = rmr_rts_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
3079 //rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
3080 if (rmrMessageBuffer.rcvMessage == nullptr) {
3081 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
3082 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
3083 } else if (rmrMessageBuffer.rcvMessage->state != 0) {
3084 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
3085 rmrMessageBuffer.rcvMessage->state, translateRmrErrorMessages(rmrMessageBuffer.rcvMessage->state).c_str());
3086 } else if (loglevel >= MDCLOG_DEBUG && (++counter % 100 == 0)) {
3087 mdclog_write(MDCLOG_DEBUG, "Got %d RIC_HEALTH_CHECK_REQ Request send : OK", counter);
3094 mdclog_write(MDCLOG_WARN, "Message Type : %d is not supported", rmrMessageBuffer.rcvMessage->mtype);
3095 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
3096 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
3097 message.message.time.tv_nsec = ts.tv_nsec;
3098 message.message.time.tv_sec = ts.tv_sec;
3099 message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
3101 buildJsonMessage(message);
3106 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3107 mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
3113 * Send message to the CU that is not expecting for successful or unsuccessful results
3114 * @param messageBuffer
3116 * @param failedMsgId
3120 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
3121 ReportingMessages_t &message,
3123 Sctp_Map_t *sctpMap) {
3124 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3125 mdclog_write(MDCLOG_DEBUG, "send message: %d to %s address", message.message.messageType, message.message.enodbName);
3128 getRequestMetaData(message, messageBuffer);
3129 if (mdclog_level_get() >= MDCLOG_INFO) {
3130 mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
3133 auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
3140 * @param messageBuffer
3142 * @param failedMesgId
3145 int sendMessagetoCu(Sctp_Map_t *sctpMap,
3146 RmrMessagesBuffer_t &messageBuffer,
3147 ReportingMessages_t &message,
3150 message.message.messageType = messageBuffer.rcvMessage->mtype;
3151 auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
3166 int addToEpoll(int epoll_fd,
3167 ConnectedCU_t *peerInfo,
3169 Sctp_Map_t *sctpMap,
3173 struct epoll_event event{};
3174 event.data.ptr = peerInfo;
3175 event.events = events;
3176 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
3177 #if !(defined(UNIT_TEST) || defined(MODULE_TEST))
3178 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3179 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here), %s, %s %d",
3180 strerror(errno), __func__, __LINE__);
3182 close(peerInfo->fileDescriptor);
3183 if (enodbName != nullptr) {
3184 cleanHashEntry(peerInfo, sctpMap);
3185 char key[MAX_ENODB_NAME_SIZE * 2];
3186 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
3187 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3188 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
3190 auto tmp = sctpMap->find(key);
3194 sctpMap->erase(key);
3197 peerInfo->enodbName[0] = 0;
3199 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
3216 int modifyToEpoll(int epoll_fd,
3217 ConnectedCU_t *peerInfo,
3219 Sctp_Map_t *sctpMap,
3223 struct epoll_event event{};
3224 event.data.ptr = peerInfo;
3225 event.events = events;
3226 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
3227 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3228 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may check not to quit here), %s, %s %d",
3229 strerror(errno), __func__, __LINE__);
3231 close(peerInfo->fileDescriptor);
3232 cleanHashEntry(peerInfo, sctpMap);
3233 char key[MAX_ENODB_NAME_SIZE * 2];
3234 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
3235 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3236 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
3238 auto tmp = sctpMap->find(key);
3242 sctpMap->erase(key);
3244 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
3251 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
3252 buildJsonMessage(message);
3254 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
3256 rmrMessageBuffer.sendMessage->state = RMR_ERR_RETRY;
3258 if (rmrMessageBuffer.sendMessage == nullptr) {
3259 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
3260 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
3264 if (rmrMessageBuffer.sendMessage->state != 0) {
3265 char meid[RMR_MAX_MEID]{};
3266 if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
3268 rmrMessageBuffer.sendMessage->state = 0;
3269 mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
3270 rmrMessageBuffer.sendMessage->mtype,
3271 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
3273 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
3275 if (rmrMessageBuffer.sendMessage == nullptr) {
3276 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
3277 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
3279 } else if (rmrMessageBuffer.sendMessage->state != 0) {
3280 mdclog_write(MDCLOG_ERR,
3281 "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
3282 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
3283 rmrMessageBuffer.sendMessage->mtype,
3284 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
3285 auto rc = rmrMessageBuffer.sendMessage->state;
3289 mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
3290 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
3291 rmrMessageBuffer.sendMessage->mtype,
3292 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
3293 return rmrMessageBuffer.sendMessage->state;
3299 void buildJsonMessage(ReportingMessages_t &message) {
3304 message.outLen = sizeof(message.base64Data);
3305 base64::encode((const unsigned char *) message.message.asndata,
3306 (const int) message.message.asnLength,
3309 if (mdclog_level_get() >= MDCLOG_DEBUG) {
3310 mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
3311 (int) message.message.asnLength,
3312 (int) message.outLen);
3315 snprintf(message.buffer, sizeof(message.buffer),
3316 "{\"header\": {\"ts\": \"%ld.%09ld\","
3317 "\"ranName\": \"%s\","
3318 "\"messageType\": %d,"
3319 "\"direction\": \"%c\"},"
3320 "\"base64Length\": %d,"
3321 "\"asnBase64\": \"%s\"}",
3322 message.message.time.tv_sec,
3323 message.message.time.tv_nsec,
3324 message.message.enodbName,
3325 message.message.messageType,
3326 message.message.direction,
3327 (int) message.outLen,
3328 message.base64Data);
3329 static src::logger_mt &lg = my_logger::get();
3331 BOOST_LOG(lg) << message.buffer;
3337 * take RMR error code to string
3341 string translateRmrErrorMessages(int state) {
3345 str = "RMR_OK - state is good";
3347 case RMR_ERR_BADARG:
3348 str = "RMR_ERR_BADARG - argument passed to function was unusable";
3350 case RMR_ERR_NOENDPT:
3351 str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
3354 str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
3357 str = "RMR_ERR_NOHDR - message didn't contain a valid header";
3359 case RMR_ERR_SENDFAILED:
3360 str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
3362 case RMR_ERR_CALLFAILED:
3363 str = "RMR_ERR_CALLFAILED - unable to send call() message";
3365 case RMR_ERR_NOWHOPEN:
3366 str = "RMR_ERR_NOWHOPEN - no wormholes are open";
3369 str = "RMR_ERR_WHID - wormhole id was invalid";
3371 case RMR_ERR_OVERFLOW:
3372 str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
3375 str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
3377 case RMR_ERR_RCVFAILED:
3378 str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
3380 case RMR_ERR_TIMEOUT:
3381 str = "RMR_ERR_TIMEOUT - message processing call timed out";
3384 str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
3387 str = "RMR_ERR_TRUNC - received message likely truncated";
3389 case RMR_ERR_INITFAILED:
3390 str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
3392 case RMR_ERR_NOTSUPP:
3393 str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
3397 snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);
3403 int fetchStreamId(ConnectedCU_t *peerInfo, ReportingMessages_t &message)
3405 auto loglevel = mdclog_level_get();
3406 int streamId = INVALID_STREAM_ID;
3407 if(message.peerInfo->isSingleStream != false)
3409 streamId = message.peerInfo->singleStreamId;
3410 if (loglevel >= MDCLOG_DEBUG) {
3411 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for SINGLE_STREAM streamId %d , Messeage Type %d ,%s",
3412 streamId,message.message.messageType, __FUNCTION__);
3416 int msgType = message.message.messageType;
3418 case RIC_E2_RESET_REQ:
3419 case RIC_E2_RESET_RESP:
3420 case RIC_E2_SETUP_RESP:
3421 case RIC_E2_SETUP_FAILURE:
3422 case RIC_ERROR_INDICATION:
3423 case RIC_SERVICE_QUERY:
3424 case RIC_SERVICE_UPDATE_ACK:
3425 case RIC_SERVICE_UPDATE_FAILURE:
3429 case RIC_SUB_DEL_REQ:
3430 case RIC_CONTROL_REQ:
3437 if (loglevel >= MDCLOG_DEBUG) {
3438 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for streamId %d Messeage Type %d, %s",
3439 streamId, message.message.messageType, __FUNCTION__);