1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 // platform project (RICP).
19 // TODO: High-level file comment.
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
45 static void catch_function(int signal) {
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
59 mdclog_attr_init(&attr);
60 mdclog_attr_set_ident(attr, "E2Terminator");
62 mdclog_attr_destroy(attr);
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
68 return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
71 double approx_CPU_MHz(unsigned sleepTime) {
72 using namespace std::chrono_literals;
74 uint64_t cycles_start = rdtscp(aux);
75 double time_start = age();
76 std::this_thread::sleep_for(sleepTime * 1ms);
77 uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78 double elapsed_time = age() - time_start;
79 return elapsed_cycles / elapsed_time;
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
87 int buildListeningPort(sctp_params_t &sctpParams) {
88 sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89 if (sctpParams.listenFD <= 0) {
90 mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
94 struct sockaddr_in6 serverAddress {};
95 serverAddress.sin6_family = AF_INET6;
96 serverAddress.sin6_addr = in6addr_any;
97 serverAddress.sin6_port = htons(sctpParams.sctpPort);
98 if (bind(sctpParams.listenFD, (SA *)&serverAddress, sizeof(serverAddress)) < 0 ) {
99 mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
102 if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
103 //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
106 if (mdclog_level_get() >= MDCLOG_DEBUG) {
107 struct sockaddr_in6 clientAddress {};
108 socklen_t len = sizeof(clientAddress);
109 getsockname(sctpParams.listenFD, (SA *)&clientAddress, &len);
111 inet_ntop(AF_INET6, &clientAddress.sin6_addr, buff, sizeof(buff));
112 mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(clientAddress.sin6_port));
115 if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
116 mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
119 struct epoll_event event {};
120 event.events = EPOLLIN | EPOLLET;
121 event.data.fd = sctpParams.listenFD;
123 // add listening port to epoll
124 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
125 printf("Failed to add descriptor to epoll\n");
126 mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
133 int buildConfiguration(sctp_params_t &sctpParams) {
134 path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
136 const int size = 2048;
137 auto fileSize = file_size(p);
138 if (fileSize > size) {
139 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
143 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
148 if (conf.openConfigFile(p.string()) == -1) {
149 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
150 p.string().c_str(), strerror(errno));
153 int rmrPort = conf.getIntValue("nano");
155 mdclog_write(MDCLOG_ERR, "illegal RMR port ");
158 sctpParams.rmrPort = (uint16_t)rmrPort;
159 snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
161 auto tmpStr = conf.getStringValue("loglevel");
162 if (tmpStr.length() == 0) {
163 mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
166 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
168 if ((tmpStr.compare("debug")) == 0) {
169 sctpParams.logLevel = MDCLOG_DEBUG;
170 } else if ((tmpStr.compare("info")) == 0) {
171 sctpParams.logLevel = MDCLOG_INFO;
172 } else if ((tmpStr.compare("warning")) == 0) {
173 sctpParams.logLevel = MDCLOG_WARN;
174 } else if ((tmpStr.compare("error")) == 0) {
175 sctpParams.logLevel = MDCLOG_ERR;
177 mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
178 sctpParams.logLevel = MDCLOG_INFO;
180 mdclog_level_set(sctpParams.logLevel);
182 tmpStr = conf.getStringValue("volume");
183 if (tmpStr.length() == 0) {
184 mdclog_write(MDCLOG_ERR, "illegal volume.");
188 char tmpLogFilespec[VOLUME_URL_SIZE];
189 tmpLogFilespec[0] = 0;
190 sctpParams.volume[0] = 0;
191 snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
192 // copy the name to temp file as well
193 snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
196 // define the file name in the tmp directory under the volume
197 strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
199 sctpParams.myIP = conf.getStringValue("local-ip");
200 if (sctpParams.myIP.length() == 0) {
201 mdclog_write(MDCLOG_ERR, "illegal local-ip.");
205 int sctpPort = conf.getIntValue("sctp-port");
206 if (sctpPort == -1) {
207 mdclog_write(MDCLOG_ERR, "illegal SCTP port ");
210 sctpParams.sctpPort = (uint16_t)sctpPort;
212 sctpParams.fqdn = conf.getStringValue("external-fqdn");
213 if (sctpParams.fqdn.length() == 0) {
214 mdclog_write(MDCLOG_ERR, "illegal external-fqdn");
218 std::string pod = conf.getStringValue("pod_name");
219 if (pod.length() == 0) {
220 mdclog_write(MDCLOG_ERR, "illegal pod_name in config file");
223 auto *podName = getenv(pod.c_str());
224 if (podName == nullptr) {
225 mdclog_write(MDCLOG_ERR, "illegal pod_name or environment variable not exists : %s", pod.c_str());
229 sctpParams.podName.assign(podName);
230 if (sctpParams.podName.length() == 0) {
231 mdclog_write(MDCLOG_ERR, "illegal pod_name");
236 tmpStr = conf.getStringValue("trace");
237 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
238 if ((tmpStr.compare("start")) == 0) {
239 mdclog_write(MDCLOG_INFO, "Trace set to: start");
240 sctpParams.trace = true;
241 } else if ((tmpStr.compare("stop")) == 0) {
242 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
243 sctpParams.trace = false;
245 jsonTrace = sctpParams.trace;
247 sctpParams.epollTimeOut = -1;
249 tmpStr = conf.getStringValue("prometheusPort");
250 if (tmpStr.length() != 0) {
251 sctpParams.prometheusPort = tmpStr;
254 sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
256 "\"pod_name\": \"%s\"}",
257 (const char *)sctpParams.myIP.c_str(),
259 sctpParams.fqdn.c_str(),
260 sctpParams.podName.c_str());
262 if (mdclog_level_get() >= MDCLOG_INFO) {
263 mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
264 mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
265 mdclog_mdc_add("volume", sctpParams.volume);
266 mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
267 mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
268 mdclog_mdc_add("pod name", sctpParams.podName.c_str());
270 mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
274 // Files written to the current working directory
275 boostLogger = logging::add_file_log(
276 keywords::file_name = tmpLogFilespec, // to temp directory
277 keywords::rotation_size = 10 * 1024 * 1024,
278 keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
279 keywords::format = "%Message%"
280 //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
283 // Setup a destination folder for collecting rotated (closed) files --since the same volume can use rename()
284 boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
285 keywords::target = sctpParams.volume
288 // Upon restart, scan the directory for files matching the file_name pattern
289 boostLogger->locked_backend()->scan_for_files();
291 // Enable auto-flushing after each tmpStr record written
292 if (mdclog_level_get() >= MDCLOG_DEBUG) {
293 boostLogger->locked_backend()->auto_flush(true);
299 void startPrometheus(sctp_params_t &sctpParams) {
300 sctpParams.prometheusFamily = &BuildCounter()
302 .Help("E2T message counter")
303 .Labels({{"POD_NAME", sctpParams.podName}})
304 .Register(*sctpParams.prometheusRegistry);
306 string prometheusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
307 if (mdclog_level_get() >= MDCLOG_DEBUG) {
308 mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", prometheusPath.c_str());
310 sctpParams.prometheusExposer = new Exposer(prometheusPath, 1);
311 sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
314 int main(const int argc, char **argv) {
315 sctp_params_t sctpParams;
318 std::random_device device{};
319 std::mt19937 generator(device());
320 std::uniform_int_distribution<long> distribution(1, (long) 1e12);
321 transactionCounter = distribution(generator);
325 // uint32_t aux1 = 0;
326 // st = rdtscp(aux1);
328 unsigned num_cpus = std::thread::hardware_concurrency();
330 mdclog_level_set(MDCLOG_INFO);
332 if (std::signal(SIGINT, catch_function) == SIG_ERR) {
333 mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
336 if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
337 mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
340 if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
341 mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
345 cpuClock = approx_CPU_MHz(100);
347 mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
349 auto result = parse(argc, argv, sctpParams);
351 if (buildConfiguration(sctpParams) != 0) {
355 //auto registry = std::make_shared<Registry>();
356 sctpParams.prometheusRegistry = std::make_shared<Registry>();
358 //sctpParams.prometheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
360 startPrometheus(sctpParams);
363 sctpParams.epoll_fd = epoll_create1(0);
364 if (sctpParams.epoll_fd == -1) {
365 mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
369 getRmrContext(sctpParams);
370 if (sctpParams.rmrCtx == nullptr) {
371 close(sctpParams.epoll_fd);
375 if (buildInotify(sctpParams) == -1) {
376 close(sctpParams.rmrListenFd);
377 rmr_close(sctpParams.rmrCtx);
378 close(sctpParams.epoll_fd);
382 if (buildListeningPort(sctpParams) != 0) {
383 close(sctpParams.rmrListenFd);
384 rmr_close(sctpParams.rmrCtx);
385 close(sctpParams.epoll_fd);
389 sctpParams.sctpMap = new mapWrapper();
391 std::vector<std::thread> threads(num_cpus);
392 // std::vector<std::thread> threads;
395 for (unsigned int i = 0; i < num_cpus; i++) {
396 threads[i] = std::thread(listener, &sctpParams);
401 int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
403 mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
408 //loop over term_init until first message from xApp
409 handleTermInit(sctpParams);
411 for (auto &t : threads) {
418 void handleTermInit(sctp_params_t &sctpParams) {
419 sendTermInit(sctpParams);
420 //send to e2 manager init of e2 term
425 auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
426 if (xappMessages > 0) {
427 if (mdclog_level_get() >= MDCLOG_INFO) {
428 mdclog_write(MDCLOG_INFO, "Got a message from some application, stop sending E2_TERM_INIT");
434 if (count % 1000 == 0) {
435 mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
436 sendTermInit(sctpParams);
441 void sendTermInit(sctp_params_t &sctpParams) {
442 rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
445 msg->mtype = E2_TERM_INIT;
447 rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
448 static unsigned char tx[32];
449 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
450 rmr_bytes2xact(msg, tx, txLen);
451 msg = rmr_send_msg(sctpParams.rmrCtx, msg);
452 if (msg == nullptr) {
453 msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
454 } else if (msg->state == 0) {
456 if (mdclog_level_get() >= MDCLOG_INFO) {
457 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT successfully sent ");
461 if (count % 100 == 0) {
462 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
477 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
478 cxxopts::Options options(argv[0], "e2 term help");
479 options.positional_help("[optional args]").show_positional_help();
480 options.allow_unrecognised_options().add_options()
481 ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
482 ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
483 ("h,help", "Print help");
485 auto result = options.parse(argc, (const char **&)argv);
487 if (result.count("help")) {
488 std::cout << options.help({""}) << std::endl;
497 * @return -1 failed 0 success
499 int buildInotify(sctp_params_t &sctpParams) {
500 sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
501 if (sctpParams.inotifyFD == -1) {
502 mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
506 sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
507 (const char *)sctpParams.configFilePath.c_str(),
508 (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
509 if (sctpParams.inotifyWD == -1) {
510 mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to inotify (inotify_add_watch) %s",
511 sctpParams.configFilePath.c_str(),
513 close(sctpParams.inotifyFD);
517 struct epoll_event event{};
518 event.events = (EPOLLIN);
519 event.data.fd = sctpParams.inotifyFD;
520 // add listening RMR FD to epoll
521 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
522 mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
523 close(sctpParams.inotifyFD);
534 void listener(sctp_params_t *params) {
535 int num_of_SCTP_messages = 0;
536 auto totalTime = 0.0;
538 mdclog_level_set(params->logLevel);
540 std::thread::id this_id = std::this_thread::get_id();
542 streambuf *oldCout = cout.rdbuf();
543 ostringstream memCout;
545 cout.rdbuf(memCout.rdbuf());
547 //return to the normal cout
551 memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
552 tid[memCout.str().length()] = 0;
553 mdclog_mdc_add("thread id", tid);
555 if (mdclog_level_get() >= MDCLOG_DEBUG) {
556 mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
559 RmrMessagesBuffer_t rmrMessageBuffer{};
560 //create and init RMR
561 rmrMessageBuffer.rmrCtx = params->rmrCtx;
563 auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
564 struct timespec end{0, 0};
565 struct timespec start{0, 0};
567 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
568 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
570 memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
571 rmrMessageBuffer.ka_message_len = params->ka_message_length;
572 rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
574 if (mdclog_level_get() >= MDCLOG_DEBUG) {
575 mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
578 ReportingMessages_t message {};
580 // for (int i = 0; i < MAX_RMR_BUFF_ARRAY; i++) {
581 // rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
582 // rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
586 if (mdclog_level_get() >= MDCLOG_DEBUG) {
587 mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
589 auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
590 if (numOfEvents == 0) { // time out
591 if (mdclog_level_get() >= MDCLOG_DEBUG) {
592 mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
595 } else if (numOfEvents < 0) {
596 if (errno == EINTR) {
597 if (mdclog_level_get() >= MDCLOG_DEBUG) {
598 mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
602 mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
605 for (auto i = 0; i < numOfEvents; i++) {
606 if (mdclog_level_get() >= MDCLOG_DEBUG) {
607 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
609 clock_gettime(CLOCK_MONOTONIC, &message.message.time);
610 start.tv_sec = message.message.time.tv_sec;
611 start.tv_nsec = message.message.time.tv_nsec;
614 if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
615 handlepoll_error(events[i], message, rmrMessageBuffer, params);
616 } else if (events[i].events & EPOLLOUT) {
617 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
618 } else if (params->listenFD == events[i].data.fd) {
619 if (mdclog_level_get() >= MDCLOG_INFO) {
620 mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
622 // new connection is requested from RAN start build connection
624 struct sockaddr in_addr {};
626 char hostBuff[NI_MAXHOST];
627 char portBuff[NI_MAXSERV];
629 in_len = sizeof(in_addr);
630 auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
631 if(peerInfo == nullptr){
632 mdclog_write(MDCLOG_ERR, "calloc failed");
635 peerInfo->sctpParams = params;
636 peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
637 if (peerInfo->fileDescriptor == -1) {
638 if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
639 /* We have processed all incoming connections. */
642 mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
646 if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
647 mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
648 close(peerInfo->fileDescriptor);
651 auto ans = getnameinfo(&in_addr, in_len,
652 peerInfo->hostName, NI_MAXHOST,
653 peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
655 mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
656 close(peerInfo->fileDescriptor);
659 if (mdclog_level_get() >= MDCLOG_DEBUG) {
660 mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
662 peerInfo->isConnected = false;
663 peerInfo->gotSetup = false;
664 if (addToEpoll(params->epoll_fd,
667 params->sctpMap, nullptr,
673 } else if (params->rmrListenFd == events[i].data.fd) {
674 // got message from XAPP
675 //num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
676 num_of_messages.fetch_add(1, std::memory_order_release);
677 if (mdclog_level_get() >= MDCLOG_DEBUG) {
678 mdclog_write(MDCLOG_DEBUG, "new RMR message");
680 if (receiveXappMessages(params->sctpMap,
682 message.message.time) != 0) {
683 mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
685 } else if (params->inotifyFD == events[i].data.fd) {
686 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
687 handleConfigChange(params);
689 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
690 * We must read whatever data is available completely, as we are running
691 * in edge-triggered mode and won't get a notification again for the same data. */
692 num_of_messages.fetch_add(1, std::memory_order_release);
693 if (mdclog_level_get() >= MDCLOG_DEBUG) {
694 mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
696 receiveDataFromSctp(&events[i],
698 num_of_SCTP_messages,
700 message.message.time);
703 clock_gettime(CLOCK_MONOTONIC, &end);
704 if (mdclog_level_get() >= MDCLOG_INFO) {
705 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
706 ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
708 if (mdclog_level_get() >= MDCLOG_DEBUG) {
709 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
710 end.tv_sec - start.tv_sec,
711 end.tv_nsec - start.tv_nsec);
721 void handleConfigChange(sctp_params_t *sctpParams) {
722 char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
723 const struct inotify_event *event;
726 path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
727 auto endlessLoop = true;
728 while (endlessLoop) {
729 auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
731 if (errno != EAGAIN) {
732 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
742 for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
743 event = (const struct inotify_event *)ptr;
744 if (event->mask & (uint32_t)IN_ISDIR) {
748 // the directory name
749 if (sctpParams->inotifyWD == event->wd) {
753 auto retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
758 // only the file we want
759 if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
760 if (mdclog_level_get() >= MDCLOG_INFO) {
761 mdclog_write(MDCLOG_INFO, "Configuration file changed");
764 const int size = 2048;
765 auto fileSize = file_size(p);
766 if (fileSize > size) {
767 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
771 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
776 if (conf.openConfigFile(p.string()) == -1) {
777 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
778 p.string().c_str(), strerror(errno));
782 auto tmpStr = conf.getStringValue("loglevel");
783 if (tmpStr.length() == 0) {
784 mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
787 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
789 if ((tmpStr.compare("debug")) == 0) {
790 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
791 sctpParams->logLevel = MDCLOG_DEBUG;
792 } else if ((tmpStr.compare("info")) == 0) {
793 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
794 sctpParams->logLevel = MDCLOG_INFO;
795 } else if ((tmpStr.compare("warning")) == 0) {
796 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
797 sctpParams->logLevel = MDCLOG_WARN;
798 } else if ((tmpStr.compare("error")) == 0) {
799 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
800 sctpParams->logLevel = MDCLOG_ERR;
802 mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
803 sctpParams->logLevel = MDCLOG_INFO;
805 mdclog_level_set(sctpParams->logLevel);
808 tmpStr = conf.getStringValue("trace");
809 if (tmpStr.length() == 0) {
810 mdclog_write(MDCLOG_ERR, "illegal trace. Set trace to stop");
814 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
815 if ((tmpStr.compare("start")) == 0) {
816 mdclog_write(MDCLOG_INFO, "Trace set to: start");
817 sctpParams->trace = true;
818 } else if ((tmpStr.compare("stop")) == 0) {
819 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
820 sctpParams->trace = false;
822 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
823 sctpParams->trace = false;
825 jsonTrace = sctpParams->trace;
838 * @param rmrMessageBuffer
841 void handleEinprogressMessages(struct epoll_event &event,
842 ReportingMessages_t &message,
843 RmrMessagesBuffer_t &rmrMessageBuffer,
844 sctp_params_t *params) {
845 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
846 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
848 mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
850 socklen_t retValLen = 0;
851 auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
852 if (rc != 0 || retVal != 0) {
854 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
855 "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
856 peerInfo->enodbName, strerror(errno));
857 } else if (retVal != 0) {
858 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
859 "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
860 peerInfo->enodbName);
863 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
864 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
865 mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
866 message.message.direction = 'N';
867 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
868 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
870 memset(peerInfo->asnData, 0, peerInfo->asnLength);
871 peerInfo->asnLength = 0;
876 peerInfo->isConnected = true;
878 if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
879 peerInfo->mtype) != 0) {
880 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
884 message.message.asndata = (unsigned char *)peerInfo->asnData;
885 message.message.asnLength = peerInfo->asnLength;
886 message.message.messageType = peerInfo->mtype;
887 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
888 num_of_messages.fetch_add(1, std::memory_order_release);
889 if (mdclog_level_get() >= MDCLOG_DEBUG) {
890 mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
891 message.message.enodbName);
893 if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
894 if (mdclog_level_get() >= MDCLOG_DEBUG) {
895 mdclog_write(MDCLOG_DEBUG, "Error write to SCTP %s %d", __func__, __LINE__);
900 memset(peerInfo->asnData, 0, peerInfo->asnLength);
901 peerInfo->asnLength = 0;
906 void handlepoll_error(struct epoll_event &event,
907 ReportingMessages_t &message,
908 RmrMessagesBuffer_t &rmrMessageBuffer,
909 sctp_params_t *params) {
910 if (event.data.fd != params->rmrListenFd) {
911 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
912 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
913 event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
915 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
916 "%s|Failed SCTP Connection",
917 peerInfo->enodbName);
918 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
919 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
921 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
922 message.message.direction = 'N';
923 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
924 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
927 close(peerInfo->fileDescriptor);
928 params->sctpMap->erase(peerInfo->enodbName);
929 cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
931 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
939 int setSocketNoBlocking(int socket) {
940 auto flags = fcntl(socket, F_GETFL, 0);
943 mdclog_mdc_add("func", "fcntl");
944 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
949 flags = (unsigned) flags | (unsigned) O_NONBLOCK;
950 if (fcntl(socket, F_SETFL, flags) == -1) {
951 mdclog_mdc_add("func", "fcntl");
952 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
965 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
967 auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
968 char searchBuff[2048]{};
970 snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
971 m->erase(searchBuff);
973 m->erase(val->enodbName);
979 * @param fd file descriptor
980 * @param data the asn data to send
981 * @param len length of the data
982 * @param enodbName the enodbName as in the map for printing purpose
983 * @param m map host information
984 * @param mtype message number
985 * @return 0 success, a negative number on fail
987 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
988 auto loglevel = mdclog_level_get();
989 int fd = peerInfo->fileDescriptor;
990 if (loglevel >= MDCLOG_DEBUG) {
991 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
992 message.message.enodbName, __FUNCTION__);
996 if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
997 if (errno == EINTR) {
1000 mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1001 if (!peerInfo->isConnected) {
1002 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1005 cleanHashEntry(peerInfo, m);
1007 char key[MAX_ENODB_NAME_SIZE * 2];
1008 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1009 message.message.messageType);
1010 if (loglevel >= MDCLOG_DEBUG) {
1011 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1013 auto tmp = m->find(key);
1020 message.message.direction = 'D';
1021 // send report.buffer of size
1022 buildJsonMessage(message);
1024 if (loglevel >= MDCLOG_DEBUG) {
1025 mdclog_write(MDCLOG_DEBUG,
1026 "SCTP message for CU %s sent from %s",
1027 message.message.enodbName,
1037 * @param rmrMessageBuffer
1039 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1040 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1041 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1043 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1044 mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1045 message.message.enodbName, (unsigned long) message.message.asnLength);
1055 * @param numOfMessages
1056 * @param rmrMessageBuffer
1060 int receiveDataFromSctp(struct epoll_event *events,
1061 Sctp_Map_t *sctpMap,
1063 RmrMessagesBuffer_t &rmrMessageBuffer,
1064 struct timespec &ts) {
1065 /* We have data on the fd waiting to be read. Read and display it.
1066 * We must read whatever data is available completely, as we are running
1067 * in edge-triggered mode and won't get a notification again for the same data. */
1068 ReportingMessages_t message {};
1070 auto loglevel = mdclog_level_get();
1072 // get the identity of the interface
1073 message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1075 struct timespec start{0, 0};
1076 struct timespec decodeStart{0, 0};
1077 struct timespec end{0, 0};
1079 E2AP_PDU_t *pdu = nullptr;
1082 if (loglevel >= MDCLOG_DEBUG) {
1083 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1084 clock_gettime(CLOCK_MONOTONIC, &start);
1086 // read the buffer directly to rmr payload
1087 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1088 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1089 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1091 if (loglevel >= MDCLOG_DEBUG) {
1092 mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1093 message.peerInfo->fileDescriptor, message.message.asnLength);
1096 memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1097 message.message.direction = 'U';
1098 message.message.time.tv_nsec = ts.tv_nsec;
1099 message.message.time.tv_sec = ts.tv_sec;
1101 if (message.message.asnLength < 0) {
1102 if (errno == EINTR) {
1105 /* If errno == EAGAIN, that means we have read all
1106 data. So goReportingMessages_t back to the main loop. */
1107 if (errno != EAGAIN) {
1108 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1110 } else if (loglevel >= MDCLOG_DEBUG) {
1111 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1114 } else if (message.message.asnLength == 0) {
1115 /* End of file. The remote has closed the connection. */
1116 if (loglevel >= MDCLOG_INFO) {
1117 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1118 message.peerInfo->fileDescriptor);
1124 if (loglevel >= MDCLOG_DEBUG) {
1125 char printBuffer[RECEIVE_SCTP_BUFFER_SIZE]{};
1126 char *tmp = printBuffer;
1127 for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1128 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1131 printBuffer[message.message.asnLength] = 0;
1132 clock_gettime(CLOCK_MONOTONIC, &end);
1133 mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1134 message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1135 mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data = : %s", message.message.asnLength,
1137 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1140 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1141 message.message.asndata, message.message.asnLength);
1142 if (rval.code != RC_OK) {
1143 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1144 message.peerInfo->enodbName);
1148 if (loglevel >= MDCLOG_DEBUG) {
1149 clock_gettime(CLOCK_MONOTONIC, &end);
1150 mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1151 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1154 FILE *stream = open_memstream(&printBuffer, &size);
1155 asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1156 mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1157 clock_gettime(CLOCK_MONOTONIC, &decodeStart);
1160 switch (pdu->present) {
1161 case E2AP_PDU_PR_initiatingMessage: {//initiating message
1162 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1165 case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1166 asnSuccessfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1169 case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1170 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1174 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1177 if (loglevel >= MDCLOG_DEBUG) {
1178 clock_gettime(CLOCK_MONOTONIC, &end);
1179 mdclog_write(MDCLOG_DEBUG,
1180 "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1181 message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
1184 if (pdu != nullptr) {
1185 ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1186 //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1192 if (loglevel >= MDCLOG_INFO) {
1193 mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1195 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1196 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1198 "%s|CU disconnected unexpectedly",
1199 message.peerInfo->enodbName);
1200 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1202 if (sendRequestToXapp(message,
1203 RIC_SCTP_CONNECTION_FAILURE,
1204 rmrMessageBuffer) != 0) {
1205 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1208 /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1209 close(message.peerInfo->fileDescriptor);
1210 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1212 if (loglevel >= MDCLOG_DEBUG) {
1213 clock_gettime(CLOCK_MONOTONIC, &end);
1214 mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1215 end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1221 static void buildAndSendSetupRequest(ReportingMessages_t &message,
1222 RmrMessagesBuffer_t &rmrMessageBuffer,
1224 string const &messageName,
1225 string const &ieName,
1226 vector<string> &functionsToAdd_v,
1227 vector<string> &functionsToModified_v*/) {
1228 auto logLevel = mdclog_level_get();
1229 // now we can send the data to e2Mgr
1232 auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1233 unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1235 er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1236 if (er.encoded == -1) {
1237 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1239 } else if (er.encoded > (ssize_t) buffer_size) {
1240 buffer_size = er.encoded + 128;
1241 mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1243 asn_DEF_E2AP_PDU.name, buffer_size);
1244 buffer_size = er.encoded + 128;
1248 buffer[er.encoded] = '\0';
1253 string res((char *)buffer);
1254 res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1255 res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1256 res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1259 // if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1260 // res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1263 // if (res.length() == 0) {
1264 // rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1265 // rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1266 // message.peerInfo->sctpParams->myIP.c_str(),
1267 // message.peerInfo->sctpParams->rmrPort,
1270 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1271 rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1272 message.peerInfo->sctpParams->myIP.c_str(),
1273 message.peerInfo->sctpParams->rmrPort,
1277 if (logLevel >= MDCLOG_DEBUG) {
1278 mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1281 rmrMsg->mtype = message.message.messageType;
1283 rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1285 static unsigned char tx[32];
1286 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1287 rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1289 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1290 if (rmrMsg == nullptr) {
1291 mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1292 } else if (rmrMsg->state != 0) {
1293 char meid[RMR_MAX_MEID]{};
1294 if (rmrMsg->state == RMR_ERR_RETRY) {
1297 mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1298 rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1299 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1300 if (rmrMsg == nullptr) {
1301 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1302 } else if (rmrMsg->state != 0) {
1303 mdclog_write(MDCLOG_ERR,
1304 "RMR Retry failed %s sending request %d to Xapp from %s",
1305 translateRmrErrorMessages(rmrMsg->state).c_str(),
1307 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1310 mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1311 translateRmrErrorMessages(rmrMsg->state).c_str(),
1313 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1316 message.peerInfo->gotSetup = true;
1317 buildJsonMessage(message);
1318 if (rmrMsg != nullptr) {
1319 rmr_free_msg(rmrMsg);
1324 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1326 runFunXML_v.clear();
1327 for (auto j = 0; j < list.list.count; j++) {
1328 auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1329 if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1330 (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1332 E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1333 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1334 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1335 (void **)&ranFunDef,
1336 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1337 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1338 if (rval.code != RC_OK) {
1339 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1341 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1345 auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1346 unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1347 memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1349 auto er = asn_encode_to_buffer(nullptr,
1351 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1355 if (er.encoded == -1) {
1356 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1357 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1359 } else if (er.encoded > (ssize_t)xml_buffer_size) {
1360 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1361 (int) xml_buffer_size,
1362 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1364 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1365 mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1366 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1371 string runFuncs = (char *)(xml_buffer);
1372 runFunXML_v.emplace_back(runFuncs);
1379 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1380 Sctp_Map_t *sctpMap,
1381 ReportingMessages_t &message,
1382 vector <string> &RANfunctionsAdded_v,
1383 vector <string> &RANfunctionsModified_v) {
1384 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1385 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1386 auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1387 if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1388 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1389 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1390 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1391 ie->value.choice.RANfunctions_List.list.count);
1393 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1397 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1398 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1399 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1400 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1401 ie->value.choice.RANfunctions_List.list.count);
1403 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1409 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1410 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1411 RANfunctionsAdded_v.size());
1419 void buildPrometheusList(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1420 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1421 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1423 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1424 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1426 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1427 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1429 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1430 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1432 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1433 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1434 // ---------------------------------------------
1435 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1436 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1438 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1439 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1441 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1442 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1444 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1445 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1446 //-------------------------------------------------------------
1448 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1449 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1451 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1452 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1454 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1455 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1457 //====================================================================================
1458 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1459 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1461 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1462 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1464 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1465 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1467 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1468 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1470 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1471 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1473 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1474 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1475 //---------------------------------------------------------------------------------------------------------
1476 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1477 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1479 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1480 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1482 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1483 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1484 //----------------------------------------------------------------------------------------------------------------
1485 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1486 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1488 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1489 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1496 * @param RANfunctionsAdded_v
1499 int collectSetupRequestData(E2AP_PDU_t *pdu,
1500 Sctp_Map_t *sctpMap,
1501 ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1502 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1503 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1504 auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1505 if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1506 // get the ran name for meid
1507 if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1508 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1509 mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1510 // no message will be sent
1514 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1515 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1517 } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1518 if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1519 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1520 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1521 ie->value.choice.RANfunctions_List.list.count);
1523 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1529 // if (mdclog_level_get() >= MDCLOG_DEBUG) {
1530 // mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1531 // RANfunctionsAdded_v.size());
1536 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1537 E2AP_PDU_t *pdu = nullptr;
1539 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1540 mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1541 rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1543 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1544 rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1545 if (rval.code != RC_OK) {
1546 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
1548 message.message.enodbName);
1552 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1553 auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1554 rmrMessageBuffer.sendMessage->payload, buff_size);
1555 if (er.encoded == -1) {
1556 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1558 } else if (er.encoded > (ssize_t)buff_size) {
1559 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1560 (int)rmrMessageBuffer.sendMessage->len,
1561 asn_DEF_E2AP_PDU.name,
1566 rmrMessageBuffer.sendMessage->len = er.encoded;
1575 * @param rmrMessageBuffer
1577 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1578 Sctp_Map_t *sctpMap,
1579 ReportingMessages_t &message,
1580 RmrMessagesBuffer_t &rmrMessageBuffer) {
1581 auto logLevel = mdclog_level_get();
1582 auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1583 if (logLevel >= MDCLOG_DEBUG) {
1584 mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1586 switch (procedureCode) {
1587 case ProcedureCode_id_E2setup: {
1588 if (logLevel >= MDCLOG_DEBUG) {
1589 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1592 // vector <string> RANfunctionsAdded_v;
1593 // vector <string> RANfunctionsModified_v;
1594 // RANfunctionsAdded_v.clear();
1595 // RANfunctionsModified_v.clear();
1596 if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1600 buildPrometheusList(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1602 string messageName("E2setupRequest");
1603 string ieName("E2setupRequestIEs");
1604 message.message.messageType = RIC_E2_SETUP_REQ;
1605 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1606 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1607 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1610 case ProcedureCode_id_RICserviceUpdate: {
1611 if (logLevel >= MDCLOG_DEBUG) {
1612 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1614 // vector <string> RANfunctionsAdded_v;
1615 // vector <string> RANfunctionsModified_v;
1616 // RANfunctionsAdded_v.clear();
1617 // RANfunctionsModified_v.clear();
1618 // if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1619 // RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1623 string messageName("RICserviceUpdate");
1624 string ieName("RICserviceUpdateIEs");
1625 message.message.messageType = RIC_SERVICE_UPDATE;
1626 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1627 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1629 buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
1632 case ProcedureCode_id_ErrorIndication: {
1633 if (logLevel >= MDCLOG_DEBUG) {
1634 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1636 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1637 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1638 if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1639 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1643 case ProcedureCode_id_Reset: {
1644 if (logLevel >= MDCLOG_DEBUG) {
1645 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1648 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1649 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1650 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1654 if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1655 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1659 case ProcedureCode_id_RICindication: {
1660 if (logLevel >= MDCLOG_DEBUG) {
1661 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1663 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1664 auto messageSent = false;
1665 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1666 if (logLevel >= MDCLOG_DEBUG) {
1667 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1669 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1670 if (logLevel >= MDCLOG_DEBUG) {
1671 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1673 if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1674 static unsigned char tx[32];
1675 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1676 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1677 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1678 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1679 (unsigned char *)message.message.enodbName,
1680 strlen(message.message.enodbName));
1681 rmrMessageBuffer.sendMessage->state = 0;
1682 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1684 //ie->value.choice.RICrequestID.ricInstanceID;
1685 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1686 mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1687 rmrMessageBuffer.sendMessage->sub_id,
1688 rmrMessageBuffer.sendMessage->mtype,
1689 ie->value.choice.RICrequestID.ricInstanceID,
1690 ie->value.choice.RICrequestID.ricRequestorID);
1692 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1693 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1694 sendRmrMessage(rmrMessageBuffer, message);
1697 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1707 mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1708 message.message.messageType = 0; // no RMR message type yet
1710 buildJsonMessage(message);
1721 * @param rmrMessageBuffer
1723 void asnSuccessfulMsg(E2AP_PDU_t *pdu,
1724 Sctp_Map_t *sctpMap,
1725 ReportingMessages_t &message,
1726 RmrMessagesBuffer_t &rmrMessageBuffer) {
1727 auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1728 auto logLevel = mdclog_level_get();
1729 if (logLevel >= MDCLOG_INFO) {
1730 mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1732 switch (procedureCode) {
1733 case ProcedureCode_id_Reset: {
1734 if (logLevel >= MDCLOG_DEBUG) {
1735 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1737 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1738 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1739 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1742 if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1743 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1747 case ProcedureCode_id_RICcontrol: {
1748 if (logLevel >= MDCLOG_DEBUG) {
1749 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1752 i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1753 auto messageSent = false;
1754 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1755 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1756 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1758 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1759 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1760 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1762 if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1763 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1764 rmrMessageBuffer.sendMessage->state = 0;
1765 // rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1766 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1768 static unsigned char tx[32];
1769 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1770 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1771 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1772 (unsigned char *)message.message.enodbName,
1773 strlen(message.message.enodbName));
1775 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1776 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1777 sendRmrMessage(rmrMessageBuffer, message);
1780 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1790 case ProcedureCode_id_RICsubscription: {
1791 if (logLevel >= MDCLOG_DEBUG) {
1792 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1794 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1795 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1796 if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1797 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1801 case ProcedureCode_id_RICsubscriptionDelete: {
1802 if (logLevel >= MDCLOG_DEBUG) {
1803 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1805 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1806 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1807 if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1808 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1813 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1814 message.message.messageType = 0; // no RMR message type yet
1815 buildJsonMessage(message);
1826 * @param rmrMessageBuffer
1828 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1829 Sctp_Map_t *sctpMap,
1830 ReportingMessages_t &message,
1831 RmrMessagesBuffer_t &rmrMessageBuffer) {
1832 auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1833 auto logLevel = mdclog_level_get();
1834 if (logLevel >= MDCLOG_INFO) {
1835 mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1837 switch (procedureCode) {
1838 case ProcedureCode_id_RICcontrol: {
1839 if (logLevel >= MDCLOG_DEBUG) {
1840 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1843 i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1844 auto messageSent = false;
1845 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1846 if (logLevel >= MDCLOG_DEBUG) {
1847 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1849 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1850 if (logLevel >= MDCLOG_DEBUG) {
1851 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1853 if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1854 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1855 rmrMessageBuffer.sendMessage->state = 0;
1856 // rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1857 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1858 static unsigned char tx[32];
1859 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1860 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1861 rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1862 strlen(message.message.enodbName));
1863 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1864 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1865 sendRmrMessage(rmrMessageBuffer, message);
1868 mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
1877 case ProcedureCode_id_RICsubscription: {
1878 if (logLevel >= MDCLOG_DEBUG) {
1879 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1881 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1882 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1883 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1884 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1888 case ProcedureCode_id_RICsubscriptionDelete: {
1889 if (logLevel >= MDCLOG_DEBUG) {
1890 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1892 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1893 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1894 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1895 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1900 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1901 message.message.messageType = 0; // no RMR message type yet
1903 buildJsonMessage(message);
1914 * @param rmrMmessageBuffer
1917 int sendRequestToXapp(ReportingMessages_t &message,
1919 RmrMessagesBuffer_t &rmrMmessageBuffer) {
1920 rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1921 (unsigned char *)message.message.enodbName,
1922 strlen(message.message.enodbName));
1923 message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1924 rmrMmessageBuffer.sendMessage->state = 0;
1925 static unsigned char tx[32];
1926 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1927 rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1929 auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1935 * @param pSctpParams
1937 void getRmrContext(sctp_params_t &pSctpParams) {
1938 pSctpParams.rmrCtx = nullptr;
1939 pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1940 if (pSctpParams.rmrCtx == nullptr) {
1941 mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1945 rmr_set_stimeout(pSctpParams.rmrCtx, 0); // disable retries for any send operation
1946 // we need to find that routing table exist and we can run
1947 if (mdclog_level_get() >= MDCLOG_INFO) {
1948 mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
1953 if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
1957 if (count % 60 == 0) {
1958 mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
1961 if (mdclog_level_get() >= MDCLOG_INFO) {
1962 mdclog_write(MDCLOG_INFO, "RMR running");
1964 rmr_init_trace(pSctpParams.rmrCtx, 200);
1965 // get the RMR fd for the epoll
1966 pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
1967 struct epoll_event event{};
1968 // add RMR fd to epoll
1969 event.events = (EPOLLIN);
1970 event.data.fd = pSctpParams.rmrListenFd;
1971 // add listening RMR FD to epoll
1972 if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
1973 mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
1974 close(pSctpParams.rmrListenFd);
1975 rmr_close(pSctpParams.rmrCtx);
1976 pSctpParams.rmrCtx = nullptr;
1983 * @param rmrMessageBuffer
1986 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1987 E2AP_PDU_t *pdu = nullptr;
1989 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1990 mdclog_write(MDCLOG_DEBUG, "got xml Format data from xApp of size %d is:%s",
1991 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
1993 auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1994 rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
1995 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1996 mdclog_write(MDCLOG_DEBUG, "%s After decoding the XML to PDU", __func__ );
1998 if (rval.code != RC_OK) {
1999 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2001 message.message.enodbName);
2005 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2006 auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2007 rmrMessageBuffer.rcvMessage->payload, buff_size);
2008 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2009 mdclog_write(MDCLOG_DEBUG, "%s After encoding PDU to PER", __func__ );
2011 if (er.encoded == -1) {
2012 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2014 } else if (er.encoded > (ssize_t)buff_size) {
2015 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2016 (int)rmrMessageBuffer.rcvMessage->len,
2017 asn_DEF_E2AP_PDU.name,
2022 rmrMessageBuffer.rcvMessage->len = er.encoded;
2029 * @param rmrMessageBuffer
2033 int receiveXappMessages(Sctp_Map_t *sctpMap,
2034 RmrMessagesBuffer_t &rmrMessageBuffer,
2035 struct timespec &ts) {
2036 int loglevel = mdclog_level_get();
2037 if (rmrMessageBuffer.rcvMessage == nullptr) {
2039 mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2043 // if (loglevel >= MDCLOG_DEBUG) {
2044 // mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2046 rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2047 if (rmrMessageBuffer.rcvMessage == nullptr) {
2048 mdclog_write(MDCLOG_ERR, "RMR Receiving message with null pointer, Reallocated rmr message buffer");
2049 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2052 ReportingMessages_t message;
2053 message.message.direction = 'D';
2054 message.message.time.tv_nsec = ts.tv_nsec;
2055 message.message.time.tv_sec = ts.tv_sec;
2057 // get message payload
2058 //auto msgData = msg->payload;
2059 if (rmrMessageBuffer.rcvMessage->state != 0) {
2060 mdclog_write(MDCLOG_ERR, "RMR Receiving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2063 rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2064 message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2065 if (message.peerInfo == nullptr) {
2066 auto type = rmrMessageBuffer.rcvMessage->mtype;
2068 case RIC_SCTP_CLEAR_ALL:
2069 case E2_TERM_KEEP_ALIVE_REQ:
2070 case RIC_HEALTH_CHECK_REQ:
2073 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2078 if (rmrMessageBuffer.rcvMessage->mtype != RIC_HEALTH_CHECK_REQ) {
2079 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
2082 switch (rmrMessageBuffer.rcvMessage->mtype) {
2083 case RIC_E2_SETUP_RESP : {
2084 if (loglevel >= MDCLOG_DEBUG) {
2085 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_RESP");
2087 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2090 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2091 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2092 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2093 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2098 case RIC_E2_SETUP_FAILURE : {
2099 if (loglevel >= MDCLOG_DEBUG) {
2100 mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_FAILURE");
2102 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2105 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2106 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2107 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2108 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2113 case RIC_ERROR_INDICATION: {
2114 if (loglevel >= MDCLOG_DEBUG) {
2115 mdclog_write(MDCLOG_DEBUG, "RIC_ERROR_INDICATION");
2117 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2118 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2119 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2120 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2126 if (loglevel >= MDCLOG_DEBUG) {
2127 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_REQ");
2129 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2130 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2131 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2132 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2137 case RIC_SUB_DEL_REQ: {
2138 if (loglevel >= MDCLOG_DEBUG) {
2139 mdclog_write(MDCLOG_DEBUG, "RIC_SUB_DEL_REQ");
2141 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2142 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2143 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2144 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2149 case RIC_CONTROL_REQ: {
2150 if (loglevel >= MDCLOG_DEBUG) {
2151 mdclog_write(MDCLOG_DEBUG, "RIC_CONTROL_REQ");
2153 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2154 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2155 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2156 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2161 case RIC_SERVICE_QUERY: {
2162 if (loglevel >= MDCLOG_DEBUG) {
2163 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_QUERY");
2165 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2168 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2169 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2170 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2171 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2176 case RIC_SERVICE_UPDATE_ACK: {
2177 if (loglevel >= MDCLOG_DEBUG) {
2178 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_ACK");
2180 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2181 mdclog_write(MDCLOG_ERR, "error in PER_FromXML");
2184 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2185 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2186 if (loglevel >= MDCLOG_DEBUG) {
2187 mdclog_write(MDCLOG_DEBUG, "Before sending to CU");
2189 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2190 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2195 case RIC_SERVICE_UPDATE_FAILURE: {
2196 if (loglevel >= MDCLOG_DEBUG) {
2197 mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_FAILURE");
2199 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2202 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2203 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
2204 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2205 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2210 case RIC_E2_RESET_REQ: {
2211 if (loglevel >= MDCLOG_DEBUG) {
2212 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_REQ");
2214 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2217 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2218 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2219 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2220 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2225 case RIC_E2_RESET_RESP: {
2226 if (loglevel >= MDCLOG_DEBUG) {
2227 mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_RESP");
2229 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2232 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2233 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2234 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2235 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2240 case RIC_SCTP_CLEAR_ALL: {
2241 mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2242 // loop on all keys and close socket and then erase all map.
2244 sctpMap->getKeys(v);
2245 for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2246 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2247 auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2248 if (peerInfo == nullptr) {
2251 close(peerInfo->fileDescriptor);
2252 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2253 message.message.direction = 'D';
2254 message.message.time.tv_nsec = ts.tv_nsec;
2255 message.message.time.tv_sec = ts.tv_sec;
2257 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2258 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2260 "%s|RIC_SCTP_CLEAR_ALL",
2261 peerInfo->enodbName);
2262 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2263 mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2264 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2265 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2275 case E2_TERM_KEEP_ALIVE_REQ: {
2276 // send message back
2277 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2278 (unsigned char *)rmrMessageBuffer.ka_message,
2279 rmrMessageBuffer.ka_message_len);
2280 rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2281 rmrMessageBuffer.sendMessage->state = 0;
2282 static unsigned char tx[32];
2283 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2284 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2285 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2286 if (rmrMessageBuffer.sendMessage == nullptr) {
2287 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2288 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2289 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2290 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2291 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2292 } else if (loglevel >= MDCLOG_DEBUG) {
2293 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2298 case RIC_HEALTH_CHECK_REQ: {
2299 static int counter = 0;
2300 // send message back
2301 rmr_bytes2payload(rmrMessageBuffer.rcvMessage,
2302 (unsigned char *)"OK",
2304 rmrMessageBuffer.rcvMessage->mtype = RIC_HEALTH_CHECK_RESP;
2305 rmrMessageBuffer.rcvMessage->state = 0;
2306 static unsigned char tx[32];
2307 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2308 rmr_bytes2xact(rmrMessageBuffer.rcvMessage, tx, txLen);
2309 rmrMessageBuffer.rcvMessage = rmr_rts_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2310 //rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2311 if (rmrMessageBuffer.rcvMessage == nullptr) {
2312 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2313 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2314 } else if (rmrMessageBuffer.rcvMessage->state != 0) {
2315 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2316 rmrMessageBuffer.rcvMessage->state, translateRmrErrorMessages(rmrMessageBuffer.rcvMessage->state).c_str());
2317 } else if (loglevel >= MDCLOG_DEBUG && ++counter % 100 == 0) {
2318 mdclog_write(MDCLOG_DEBUG, "Got %d RIC_HEALTH_CHECK_REQ Request send : OK", counter);
2325 mdclog_write(MDCLOG_WARN, "Message Type : %d is not supported", rmrMessageBuffer.rcvMessage->mtype);
2326 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2327 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2328 message.message.time.tv_nsec = ts.tv_nsec;
2329 message.message.time.tv_sec = ts.tv_sec;
2330 message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2332 buildJsonMessage(message);
2337 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2338 mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2344 * Send message to the CU that is not expecting for successful or unsuccessful results
2345 * @param messageBuffer
2347 * @param failedMsgId
2351 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2352 ReportingMessages_t &message,
2354 Sctp_Map_t *sctpMap) {
2355 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2356 mdclog_write(MDCLOG_DEBUG, "send message: %d to %s address", message.message.messageType, message.message.enodbName);
2359 getRequestMetaData(message, messageBuffer);
2360 if (mdclog_level_get() >= MDCLOG_INFO) {
2361 mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2364 auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2371 * @param messageBuffer
2373 * @param failedMesgId
2376 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2377 RmrMessagesBuffer_t &messageBuffer,
2378 ReportingMessages_t &message,
2381 message.message.messageType = messageBuffer.rcvMessage->mtype;
2382 auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2397 int addToEpoll(int epoll_fd,
2398 ConnectedCU_t *peerInfo,
2400 Sctp_Map_t *sctpMap,
2404 struct epoll_event event{};
2405 event.data.ptr = peerInfo;
2406 event.events = events;
2407 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2408 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2409 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here), %s, %s %d",
2410 strerror(errno), __func__, __LINE__);
2412 close(peerInfo->fileDescriptor);
2413 if (enodbName != nullptr) {
2414 cleanHashEntry(peerInfo, sctpMap);
2415 char key[MAX_ENODB_NAME_SIZE * 2];
2416 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2417 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2418 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2420 auto tmp = sctpMap->find(key);
2423 sctpMap->erase(key);
2426 peerInfo->enodbName[0] = 0;
2428 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2444 int modifyToEpoll(int epoll_fd,
2445 ConnectedCU_t *peerInfo,
2447 Sctp_Map_t *sctpMap,
2451 struct epoll_event event{};
2452 event.data.ptr = peerInfo;
2453 event.events = events;
2454 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2455 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2456 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may check not to quit here), %s, %s %d",
2457 strerror(errno), __func__, __LINE__);
2459 close(peerInfo->fileDescriptor);
2460 cleanHashEntry(peerInfo, sctpMap);
2461 char key[MAX_ENODB_NAME_SIZE * 2];
2462 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2463 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2464 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2466 auto tmp = sctpMap->find(key);
2470 sctpMap->erase(key);
2471 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
2478 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2479 buildJsonMessage(message);
2481 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2483 if (rmrMessageBuffer.sendMessage == nullptr) {
2484 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2485 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2489 if (rmrMessageBuffer.sendMessage->state != 0) {
2490 char meid[RMR_MAX_MEID]{};
2491 if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2493 rmrMessageBuffer.sendMessage->state = 0;
2494 mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2495 rmrMessageBuffer.sendMessage->mtype,
2496 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2497 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2498 if (rmrMessageBuffer.sendMessage == nullptr) {
2499 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2500 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2502 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2503 mdclog_write(MDCLOG_ERR,
2504 "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2505 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2506 rmrMessageBuffer.sendMessage->mtype,
2507 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2508 auto rc = rmrMessageBuffer.sendMessage->state;
2512 mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2513 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2514 rmrMessageBuffer.sendMessage->mtype,
2515 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2516 return rmrMessageBuffer.sendMessage->state;
2522 void buildJsonMessage(ReportingMessages_t &message) {
2524 message.outLen = sizeof(message.base64Data);
2525 base64::encode((const unsigned char *) message.message.asndata,
2526 (const int) message.message.asnLength,
2529 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2530 mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2531 (int) message.message.asnLength,
2532 (int) message.outLen);
2535 snprintf(message.buffer, sizeof(message.buffer),
2536 "{\"header\": {\"ts\": \"%ld.%09ld\","
2537 "\"ranName\": \"%s\","
2538 "\"messageType\": %d,"
2539 "\"direction\": \"%c\"},"
2540 "\"base64Length\": %d,"
2541 "\"asnBase64\": \"%s\"}",
2542 message.message.time.tv_sec,
2543 message.message.time.tv_nsec,
2544 message.message.enodbName,
2545 message.message.messageType,
2546 message.message.direction,
2547 (int) message.outLen,
2548 message.base64Data);
2549 static src::logger_mt &lg = my_logger::get();
2551 BOOST_LOG(lg) << message.buffer;
2557 * take RMR error code to string
2561 string translateRmrErrorMessages(int state) {
2565 str = "RMR_OK - state is good";
2567 case RMR_ERR_BADARG:
2568 str = "RMR_ERR_BADARG - argument passed to function was unusable";
2570 case RMR_ERR_NOENDPT:
2571 str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2574 str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2577 str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2579 case RMR_ERR_SENDFAILED:
2580 str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2582 case RMR_ERR_CALLFAILED:
2583 str = "RMR_ERR_CALLFAILED - unable to send call() message";
2585 case RMR_ERR_NOWHOPEN:
2586 str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2589 str = "RMR_ERR_WHID - wormhole id was invalid";
2591 case RMR_ERR_OVERFLOW:
2592 str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2595 str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2597 case RMR_ERR_RCVFAILED:
2598 str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2600 case RMR_ERR_TIMEOUT:
2601 str = "RMR_ERR_TIMEOUT - message processing call timed out";
2604 str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2607 str = "RMR_ERR_TRUNC - received message likely truncated";
2609 case RMR_ERR_INITFAILED:
2610 str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2612 case RMR_ERR_NOTSUPP:
2613 str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2617 snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);