1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 // platform project (RICP).
19 // TODO: High-level file comment.
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
27 #include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
29 #include "pugixml/src/pugixml.hpp"
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
45 static void catch_function(int signal) {
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
59 mdclog_attr_init(&attr);
60 mdclog_attr_set_ident(attr, "E2Terminator");
62 mdclog_attr_destroy(attr);
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
68 return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
71 double approx_CPU_MHz(unsigned sleeptime) {
72 using namespace std::chrono_literals;
74 uint64_t cycles_start = rdtscp(aux);
75 double time_start = age();
76 std::this_thread::sleep_for(sleeptime * 1ms);
77 uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78 double elapsed_time = age() - time_start;
79 return elapsed_cycles / elapsed_time;
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
87 int buildListeningPort(sctp_params_t &sctpParams) {
88 sctpParams.listenFD = socket (AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89 struct sockaddr_in6 servaddr {};
90 servaddr.sin6_family = AF_INET6;
91 servaddr.sin6_addr = in6addr_any;
92 servaddr.sin6_port = htons(sctpParams.sctpPort);
93 if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
94 mdclog_write(MDCLOG_ERR, "Error binding. %s\n", strerror(errno));
97 if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
98 //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
101 if (mdclog_level_get() >= MDCLOG_DEBUG) {
102 struct sockaddr_in6 cliaddr {};
103 socklen_t len = sizeof(cliaddr);
104 getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
106 inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
107 mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
110 if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
111 mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
114 struct epoll_event event {};
115 event.events = EPOLLIN | EPOLLET;
116 event.data.fd = sctpParams.listenFD;
118 // add listening port to epoll
119 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
120 printf("Failed to add descriptor to epoll\n");
121 mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
128 int buildConfiguration(sctp_params_t &sctpParams) {
129 path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
131 const int size = 2048;
132 auto fileSize = file_size(p);
133 if (fileSize > size) {
134 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
138 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
143 if (conf.openConfigFile(p.string()) == -1) {
144 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
145 p.string().c_str(), strerror(errno));
148 int rmrPort = conf.getIntValue("nano");
150 mdclog_write(MDCLOG_ERR, "illigal RMR port ");
153 sctpParams.rmrPort = (uint16_t)rmrPort;
154 snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
156 auto tmpStr = conf.getStringValue("loglevel");
157 if (tmpStr.length() == 0) {
158 mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
161 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
163 if ((tmpStr.compare("debug")) == 0) {
164 sctpParams.logLevel = MDCLOG_DEBUG;
165 } else if ((tmpStr.compare("info")) == 0) {
166 sctpParams.logLevel = MDCLOG_INFO;
167 } else if ((tmpStr.compare("warning")) == 0) {
168 sctpParams.logLevel = MDCLOG_WARN;
169 } else if ((tmpStr.compare("error")) == 0) {
170 sctpParams.logLevel = MDCLOG_ERR;
172 mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
173 sctpParams.logLevel = MDCLOG_INFO;
175 mdclog_level_set(sctpParams.logLevel);
177 tmpStr = conf.getStringValue("volume");
178 if (tmpStr.length() == 0) {
179 mdclog_write(MDCLOG_ERR, "illigal volume.");
183 char tmpLogFilespec[VOLUME_URL_SIZE];
184 tmpLogFilespec[0] = 0;
185 sctpParams.volume[0] = 0;
186 snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
187 // copy the name to temp file as well
188 snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
191 // define the file name in the tmp directory under the volume
192 strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
194 sctpParams.myIP = conf.getStringValue("local-ip");
195 if (sctpParams.myIP.length() == 0) {
196 mdclog_write(MDCLOG_ERR, "illigal local-ip.");
200 int sctpPort = conf.getIntValue("sctp-port");
201 if (sctpPort == -1) {
202 mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
205 sctpParams.sctpPort = (uint16_t)sctpPort;
207 sctpParams.fqdn = conf.getStringValue("external-fqdn");
208 if (sctpParams.fqdn.length() == 0) {
209 mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
213 std::string pod = conf.getStringValue("pod_name");
214 if (pod.length() == 0) {
215 mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
218 auto *podName = getenv(pod.c_str());
219 if (podName == nullptr) {
220 mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
224 sctpParams.podName.assign(podName);
225 if (sctpParams.podName.length() == 0) {
226 mdclog_write(MDCLOG_ERR, "illigal pod_name");
231 tmpStr = conf.getStringValue("trace");
232 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
233 if ((tmpStr.compare("start")) == 0) {
234 mdclog_write(MDCLOG_INFO, "Trace set to: start");
235 sctpParams.trace = true;
236 } else if ((tmpStr.compare("stop")) == 0) {
237 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
238 sctpParams.trace = false;
240 jsonTrace = sctpParams.trace;
242 sctpParams.epollTimeOut = -1;
243 tmpStr = conf.getStringValue("prometheusMode");
244 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
245 if (tmpStr.length() != 0) {
246 if (tmpStr.compare("push") == 0) {
247 sctpParams.prometheusPushAddress = tmpStr;
248 auto timeout = conf.getIntValue("prometheusPushTimeOut");
249 if (timeout >= 5 && timeout <= 300) {
250 sctpParams.epollTimeOut = timeout * 1000;
252 sctpParams.epollTimeOut = 10 * 1000;
257 tmpStr = conf.getStringValue("prometheusPushAddr");
258 if (tmpStr.length() != 0) {
259 sctpParams.prometheusMode = tmpStr;
262 tmpStr = conf.getStringValue("prometheusPort");
263 if (tmpStr.length() != 0) {
264 sctpParams.prometheusPort = tmpStr;
267 sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
269 "\"pod_name\": \"%s\"}",
270 (const char *)sctpParams.myIP.c_str(),
272 sctpParams.fqdn.c_str(),
273 sctpParams.podName.c_str());
275 if (mdclog_level_get() >= MDCLOG_INFO) {
276 mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
277 mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
278 mdclog_mdc_add("volume", sctpParams.volume);
279 mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
280 mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
281 mdclog_mdc_add("pod name", sctpParams.podName.c_str());
283 mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
287 // Files written to the current working directory
288 boostLogger = logging::add_file_log(
289 keywords::file_name = tmpLogFilespec, // to temp directory
290 keywords::rotation_size = 10 * 1024 * 1024,
291 keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
292 keywords::format = "%Message%"
293 //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
296 // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
297 boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
298 keywords::target = sctpParams.volume
301 // Upon restart, scan the directory for files matching the file_name pattern
302 boostLogger->locked_backend()->scan_for_files();
304 // Enable auto-flushing after each tmpStr record written
305 if (mdclog_level_get() >= MDCLOG_DEBUG) {
306 boostLogger->locked_backend()->auto_flush(true);
312 static std::string GetHostName() {
315 if (::gethostname(hostname, sizeof(hostname))) {
323 int main(const int argc, char **argv) {
324 sctp_params_t sctpParams;
327 std::random_device device{};
328 std::mt19937 generator(device());
329 std::uniform_int_distribution<long> distribution(1, (long) 1e12);
330 transactionCounter = distribution(generator);
334 // uint32_t aux1 = 0;
335 // st = rdtscp(aux1);
337 unsigned num_cpus = std::thread::hardware_concurrency();
339 mdclog_level_set(MDCLOG_INFO);
341 if (std::signal(SIGINT, catch_function) == SIG_ERR) {
342 mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
345 if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
346 mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
349 if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
350 mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
354 cpuClock = approx_CPU_MHz(100);
356 mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
358 auto result = parse(argc, argv, sctpParams);
360 if (buildConfiguration(sctpParams) != 0) {
364 //auto registry = std::make_shared<Registry>();
365 sctpParams.prometheusRegistry = std::make_shared<Registry>();
367 //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
369 sctpParams.prometheusFamily = &BuildCounter()
371 .Help("E2T message counter")
372 .Labels({{"E", sctpParams.podName}})
373 .Register(*sctpParams.prometheusRegistry);
377 sctpParams.epoll_fd = epoll_create1(0);
378 if (sctpParams.epoll_fd == -1) {
379 mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
383 getRmrContext(sctpParams);
384 if (sctpParams.rmrCtx == nullptr) {
385 close(sctpParams.epoll_fd);
389 if (buildInotify(sctpParams) == -1) {
390 close(sctpParams.rmrListenFd);
391 rmr_close(sctpParams.rmrCtx);
392 close(sctpParams.epoll_fd);
396 if (buildListeningPort(sctpParams) != 0) {
397 close(sctpParams.rmrListenFd);
398 rmr_close(sctpParams.rmrCtx);
399 close(sctpParams.epoll_fd);
403 sctpParams.sctpMap = new mapWrapper();
405 std::vector<std::thread> threads(num_cpus);
406 // std::vector<std::thread> threads;
408 if (sctpParams.prometheusMode.compare("pull") == 0) {
409 sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
410 sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
411 } else if (sctpParams.prometheusMode.compare("push") == 0) {
412 const auto labels = Gateway::GetInstanceLabel(GetHostName());
416 auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
417 // If string doesn't have
418 // character ch present in it
419 if (found != string::npos) {
420 address = sctpParams.prometheusPushAddress.substr(0,found);
421 port = sctpParams.prometheusPushAddress.substr(found + 1);
422 sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
423 sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
425 mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
430 for (unsigned int i = 0; i < num_cpus; i++) {
431 threads[i] = std::thread(listener, &sctpParams);
436 int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
438 mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
443 //loop over term_init until first message from xApp
444 handleTermInit(sctpParams);
446 for (auto &t : threads) {
453 void handleTermInit(sctp_params_t &sctpParams) {
454 sendTermInit(sctpParams);
455 //send to e2 manager init of e2 term
460 auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
461 if (xappMessages > 0) {
462 if (mdclog_level_get() >= MDCLOG_INFO) {
463 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
469 if (count % 1000 == 0) {
470 mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
471 sendTermInit(sctpParams);
476 void sendTermInit(sctp_params_t &sctpParams) {
477 rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
480 msg->mtype = E2_TERM_INIT;
482 rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
483 static unsigned char tx[32];
484 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
485 rmr_bytes2xact(msg, tx, txLen);
486 msg = rmr_send_msg(sctpParams.rmrCtx, msg);
487 if (msg == nullptr) {
488 msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
489 } else if (msg->state == 0) {
491 if (mdclog_level_get() >= MDCLOG_INFO) {
492 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
496 if (count % 100 == 0) {
497 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
512 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
513 cxxopts::Options options(argv[0], "e2 term help");
514 options.positional_help("[optional args]").show_positional_help();
515 options.allow_unrecognised_options().add_options()
516 ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
517 ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
518 ("h,help", "Print help");
520 auto result = options.parse(argc, argv);
522 if (result.count("help")) {
523 std::cout << options.help({""}) << std::endl;
532 * @return -1 failed 0 success
534 int buildInotify(sctp_params_t &sctpParams) {
535 sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
536 if (sctpParams.inotifyFD == -1) {
537 mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
538 close(sctpParams.rmrListenFd);
539 rmr_close(sctpParams.rmrCtx);
540 close(sctpParams.epoll_fd);
544 sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
545 (const char *)sctpParams.configFilePath.c_str(),
546 (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
547 if (sctpParams.inotifyWD == -1) {
548 mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to inotify (inotify_add_watch) %s",
549 sctpParams.configFilePath.c_str(),
551 close(sctpParams.inotifyFD);
555 struct epoll_event event{};
556 event.events = (EPOLLIN);
557 event.data.fd = sctpParams.inotifyFD;
558 // add listening RMR FD to epoll
559 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
560 mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
561 close(sctpParams.inotifyFD);
572 void listener(sctp_params_t *params) {
573 int num_of_SCTP_messages = 0;
574 auto totalTime = 0.0;
576 mdclog_level_set(params->logLevel);
578 std::thread::id this_id = std::this_thread::get_id();
580 streambuf *oldCout = cout.rdbuf();
581 ostringstream memCout;
583 cout.rdbuf(memCout.rdbuf());
585 //return to the normal cout
589 memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
590 tid[memCout.str().length()] = 0;
591 mdclog_mdc_add("thread id", tid);
593 if (mdclog_level_get() >= MDCLOG_DEBUG) {
594 mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
597 RmrMessagesBuffer_t rmrMessageBuffer{};
598 //create and init RMR
599 rmrMessageBuffer.rmrCtx = params->rmrCtx;
601 auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
602 struct timespec end{0, 0};
603 struct timespec start{0, 0};
605 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
606 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
608 memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
609 rmrMessageBuffer.ka_message_len = params->ka_message_length;
610 rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
612 if (mdclog_level_get() >= MDCLOG_DEBUG) {
613 mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
616 ReportingMessages_t message {};
618 // for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
619 // rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
620 // rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
623 bool gatewayflag = false;
627 if (mdclog_level_get() >= MDCLOG_DEBUG) {
628 mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
630 auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
631 if (numOfEvents == 0) {
632 if (params->prometheusGateway != nullptr) {
633 gateWay = params->prometheusGateway->AsyncPush();
637 } else if (numOfEvents < 0) {
638 if (errno == EINTR) {
639 if (mdclog_level_get() >= MDCLOG_DEBUG) {
640 mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
644 mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
649 auto rc = gateWay.get();
651 mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
652 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
653 mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
656 for (auto i = 0; i < numOfEvents; i++) {
657 if (mdclog_level_get() >= MDCLOG_DEBUG) {
658 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
660 clock_gettime(CLOCK_MONOTONIC, &message.message.time);
661 start.tv_sec = message.message.time.tv_sec;
662 start.tv_nsec = message.message.time.tv_nsec;
665 if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
666 handlepoll_error(events[i], message, rmrMessageBuffer, params);
667 } else if (events[i].events & EPOLLOUT) {
668 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
669 } else if (params->listenFD == events[i].data.fd) {
670 if (mdclog_level_get() >= MDCLOG_INFO) {
671 mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
673 // new connection is requested from RAN start build connection
675 struct sockaddr in_addr {};
677 char hostBuff[NI_MAXHOST];
678 char portBuff[NI_MAXSERV];
680 in_len = sizeof(in_addr);
681 auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
682 peerInfo->sctpParams = params;
683 peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
684 if (peerInfo->fileDescriptor == -1) {
685 if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
686 /* We have processed all incoming connections. */
689 mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
693 if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
694 mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
695 close(peerInfo->fileDescriptor);
698 auto ans = getnameinfo(&in_addr, in_len,
699 peerInfo->hostName, NI_MAXHOST,
700 peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
702 mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
703 close(peerInfo->fileDescriptor);
706 if (mdclog_level_get() >= MDCLOG_DEBUG) {
707 mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
709 peerInfo->isConnected = false;
710 peerInfo->gotSetup = false;
711 if (addToEpoll(params->epoll_fd,
714 params->sctpMap, nullptr,
720 } else if (params->rmrListenFd == events[i].data.fd) {
721 // got message from XAPP
722 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
723 num_of_messages.fetch_add(1, std::memory_order_release);
724 if (mdclog_level_get() >= MDCLOG_DEBUG) {
725 mdclog_write(MDCLOG_DEBUG, "new message from RMR");
727 if (receiveXappMessages(params->sctpMap,
729 message.message.time) != 0) {
730 mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
732 } else if (params->inotifyFD == events[i].data.fd) {
733 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
734 handleConfigChange(params);
736 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
737 * We must read whatever data is available completely, as we are running
738 * in edge-triggered mode and won't get a notification again for the same data. */
739 num_of_messages.fetch_add(1, std::memory_order_release);
740 if (mdclog_level_get() >= MDCLOG_DEBUG) {
741 mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
743 receiveDataFromSctp(&events[i],
745 num_of_SCTP_messages,
747 message.message.time);
750 clock_gettime(CLOCK_MONOTONIC, &end);
751 if (mdclog_level_get() >= MDCLOG_INFO) {
752 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
753 ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
755 if (mdclog_level_get() >= MDCLOG_DEBUG) {
756 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
757 end.tv_sec - start.tv_sec,
758 end.tv_nsec - start.tv_nsec);
768 void handleConfigChange(sctp_params_t *sctpParams) {
769 char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
770 const struct inotify_event *event;
773 path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
774 auto endlessLoop = true;
775 while (endlessLoop) {
776 auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
778 if (errno != EAGAIN) {
779 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
789 for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
790 event = (const struct inotify_event *)ptr;
791 if (event->mask & (uint32_t)IN_ISDIR) {
795 // the directory name
796 if (sctpParams->inotifyWD == event->wd) {
800 auto retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
805 // only the file we want
806 if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
807 if (mdclog_level_get() >= MDCLOG_INFO) {
808 mdclog_write(MDCLOG_INFO, "Configuration file changed");
811 const int size = 2048;
812 auto fileSize = file_size(p);
813 if (fileSize > size) {
814 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
818 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
823 if (conf.openConfigFile(p.string()) == -1) {
824 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
825 p.string().c_str(), strerror(errno));
829 auto tmpStr = conf.getStringValue("loglevel");
830 if (tmpStr.length() == 0) {
831 mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
834 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
836 if ((tmpStr.compare("debug")) == 0) {
837 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
838 sctpParams->logLevel = MDCLOG_DEBUG;
839 } else if ((tmpStr.compare("info")) == 0) {
840 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
841 sctpParams->logLevel = MDCLOG_INFO;
842 } else if ((tmpStr.compare("warning")) == 0) {
843 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
844 sctpParams->logLevel = MDCLOG_WARN;
845 } else if ((tmpStr.compare("error")) == 0) {
846 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
847 sctpParams->logLevel = MDCLOG_ERR;
849 mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
850 sctpParams->logLevel = MDCLOG_INFO;
852 mdclog_level_set(sctpParams->logLevel);
855 tmpStr = conf.getStringValue("trace");
856 if (tmpStr.length() == 0) {
857 mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
861 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
862 if ((tmpStr.compare("start")) == 0) {
863 mdclog_write(MDCLOG_INFO, "Trace set to: start");
864 sctpParams->trace = true;
865 } else if ((tmpStr.compare("stop")) == 0) {
866 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
867 sctpParams->trace = false;
869 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
870 sctpParams->trace = false;
872 jsonTrace = sctpParams->trace;
874 if (sctpParams->prometheusMode.compare("push") == 0) {
875 auto timeout = conf.getIntValue("prometheusPushTimeOut");
876 if (timeout >= 5 && timeout <= 300) {
877 sctpParams->epollTimeOut = timeout * 1000;
879 mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
894 * @param rmrMessageBuffer
897 void handleEinprogressMessages(struct epoll_event &event,
898 ReportingMessages_t &message,
899 RmrMessagesBuffer_t &rmrMessageBuffer,
900 sctp_params_t *params) {
901 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
902 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
904 mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
906 socklen_t retValLen = 0;
907 auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
908 if (rc != 0 || retVal != 0) {
910 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
911 "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
912 peerInfo->enodbName, strerror(errno));
913 } else if (retVal != 0) {
914 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
915 "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
916 peerInfo->enodbName);
919 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
920 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
921 mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
922 message.message.direction = 'N';
923 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
924 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
926 memset(peerInfo->asnData, 0, peerInfo->asnLength);
927 peerInfo->asnLength = 0;
932 peerInfo->isConnected = true;
934 if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
935 peerInfo->mtype) != 0) {
936 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
940 message.message.asndata = (unsigned char *)peerInfo->asnData;
941 message.message.asnLength = peerInfo->asnLength;
942 message.message.messageType = peerInfo->mtype;
943 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
944 num_of_messages.fetch_add(1, std::memory_order_release);
945 if (mdclog_level_get() >= MDCLOG_DEBUG) {
946 mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
947 message.message.enodbName);
949 if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
950 if (mdclog_level_get() >= MDCLOG_DEBUG) {
951 mdclog_write(MDCLOG_DEBUG, "Error write to SCTP %s %d", __func__, __LINE__);
956 memset(peerInfo->asnData, 0, peerInfo->asnLength);
957 peerInfo->asnLength = 0;
962 void handlepoll_error(struct epoll_event &event,
963 ReportingMessages_t &message,
964 RmrMessagesBuffer_t &rmrMessageBuffer,
965 sctp_params_t *params) {
966 if (event.data.fd != params->rmrListenFd) {
967 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
968 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
969 event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
971 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
972 "%s|Failed SCTP Connection",
973 peerInfo->enodbName);
974 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
975 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
977 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
978 message.message.direction = 'N';
979 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
980 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
983 close(peerInfo->fileDescriptor);
984 params->sctpMap->erase(peerInfo->enodbName);
985 cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
987 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
995 int setSocketNoBlocking(int socket) {
996 auto flags = fcntl(socket, F_GETFL, 0);
999 mdclog_mdc_add("func", "fcntl");
1000 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1005 flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1006 if (fcntl(socket, F_SETFL, flags) == -1) {
1007 mdclog_mdc_add("func", "fcntl");
1008 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1021 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1023 auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1024 char searchBuff[2048]{};
1026 snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1027 m->erase(searchBuff);
1029 m->erase(val->enodbName);
1035 * @param fd file discriptor
1036 * @param data the asn data to send
1037 * @param len length of the data
1038 * @param enodbName the enodbName as in the map for printing purpose
1039 * @param m map host information
1040 * @param mtype message number
1041 * @return 0 success, anegative number on fail
1043 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1044 auto loglevel = mdclog_level_get();
1045 int fd = peerInfo->fileDescriptor;
1046 if (loglevel >= MDCLOG_DEBUG) {
1047 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1048 message.message.enodbName, __FUNCTION__);
1052 if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1053 if (errno == EINTR) {
1056 mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1057 if (!peerInfo->isConnected) {
1058 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1061 cleanHashEntry(peerInfo, m);
1063 char key[MAX_ENODB_NAME_SIZE * 2];
1064 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1065 message.message.messageType);
1066 if (loglevel >= MDCLOG_DEBUG) {
1067 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1069 auto tmp = m->find(key);
1076 message.message.direction = 'D';
1077 // send report.buffer of size
1078 buildJsonMessage(message);
1080 if (loglevel >= MDCLOG_DEBUG) {
1081 mdclog_write(MDCLOG_DEBUG,
1082 "SCTP message for CU %s sent from %s",
1083 message.message.enodbName,
1093 * @param rmrMessageBuffer
1095 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1096 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1097 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1099 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1100 mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1101 message.message.enodbName, (unsigned long) message.message.asnLength);
1111 * @param numOfMessages
1112 * @param rmrMessageBuffer
1116 int receiveDataFromSctp(struct epoll_event *events,
1117 Sctp_Map_t *sctpMap,
1119 RmrMessagesBuffer_t &rmrMessageBuffer,
1120 struct timespec &ts) {
1121 /* We have data on the fd waiting to be read. Read and display it.
1122 * We must read whatever data is available completely, as we are running
1123 * in edge-triggered mode and won't get a notification again for the same data. */
1124 ReportingMessages_t message {};
1126 auto loglevel = mdclog_level_get();
1128 // get the identity of the interface
1129 message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1131 struct timespec start{0, 0};
1132 struct timespec decodestart{0, 0};
1133 struct timespec end{0, 0};
1135 E2AP_PDU_t *pdu = nullptr;
1138 if (loglevel >= MDCLOG_DEBUG) {
1139 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1140 clock_gettime(CLOCK_MONOTONIC, &start);
1142 // read the buffer directly to rmr payload
1143 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1144 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1145 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1147 if (loglevel >= MDCLOG_DEBUG) {
1148 mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1149 message.peerInfo->fileDescriptor, message.message.asnLength);
1152 memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1153 message.message.direction = 'U';
1154 message.message.time.tv_nsec = ts.tv_nsec;
1155 message.message.time.tv_sec = ts.tv_sec;
1157 if (message.message.asnLength < 0) {
1158 if (errno == EINTR) {
1161 /* If errno == EAGAIN, that means we have read all
1162 data. So goReportingMessages_t back to the main loop. */
1163 if (errno != EAGAIN) {
1164 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1166 } else if (loglevel >= MDCLOG_DEBUG) {
1167 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1170 } else if (message.message.asnLength == 0) {
1171 /* End of file. The remote has closed the connection. */
1172 if (loglevel >= MDCLOG_INFO) {
1173 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1174 message.peerInfo->fileDescriptor);
1180 if (loglevel >= MDCLOG_DEBUG) {
1181 char printBuffer[4096]{};
1182 char *tmp = printBuffer;
1183 for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1184 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1187 printBuffer[message.message.asnLength] = 0;
1188 clock_gettime(CLOCK_MONOTONIC, &end);
1189 mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1190 message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1191 mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data = : %s", message.message.asnLength,
1193 clock_gettime(CLOCK_MONOTONIC, &decodestart);
1196 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1197 message.message.asndata, message.message.asnLength);
1198 if (rval.code != RC_OK) {
1199 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1200 message.peerInfo->enodbName);
1204 if (loglevel >= MDCLOG_DEBUG) {
1205 clock_gettime(CLOCK_MONOTONIC, &end);
1206 mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1207 message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1210 FILE *stream = open_memstream(&printBuffer, &size);
1211 asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1212 mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1213 clock_gettime(CLOCK_MONOTONIC, &decodestart);
1216 switch (pdu->present) {
1217 case E2AP_PDU_PR_initiatingMessage: {//initiating message
1218 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1221 case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1222 asnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1225 case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1226 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1230 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1233 if (loglevel >= MDCLOG_DEBUG) {
1234 clock_gettime(CLOCK_MONOTONIC, &end);
1235 mdclog_write(MDCLOG_DEBUG,
1236 "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1237 message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1240 if (pdu != nullptr) {
1241 ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1242 //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1248 if (loglevel >= MDCLOG_INFO) {
1249 mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1251 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1252 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1254 "%s|CU disconnected unexpectedly",
1255 message.peerInfo->enodbName);
1256 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1258 if (sendRequestToXapp(message,
1259 RIC_SCTP_CONNECTION_FAILURE,
1260 rmrMessageBuffer) != 0) {
1261 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1264 /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1265 close(message.peerInfo->fileDescriptor);
1266 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1268 if (loglevel >= MDCLOG_DEBUG) {
1269 clock_gettime(CLOCK_MONOTONIC, &end);
1270 mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1271 end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1277 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1278 RmrMessagesBuffer_t &rmrMessageBuffer,
1280 string const &messageName,
1281 string const &ieName,
1282 vector<string> &functionsToAdd_v,
1283 vector<string> &functionsToModified_v) {
1284 auto logLevel = mdclog_level_get();
1285 // now we can send the data to e2Mgr
1288 auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1289 unsigned char *buffer;
1291 buffer = (unsigned char *)malloc(buffer_size);
1292 er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1293 if (er.encoded == -1) {
1294 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1296 } else if (er.encoded > (ssize_t) buffer_size) {
1297 buffer_size = er.encoded + 128;
1298 mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1300 asn_DEF_E2AP_PDU.name, buffer_size);
1301 buffer_size = er.encoded + 128;
1305 buffer[er.encoded] = '\0';
1311 if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1312 res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1315 if (res.length() == 0) {
1316 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1317 rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1318 message.peerInfo->sctpParams->myIP.c_str(),
1319 message.peerInfo->sctpParams->rmrPort,
1322 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1323 rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1324 message.peerInfo->sctpParams->myIP.c_str(),
1325 message.peerInfo->sctpParams->rmrPort,
1329 if (logLevel >= MDCLOG_DEBUG) {
1330 mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1333 rmrMsg->mtype = message.message.messageType;
1335 rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1337 static unsigned char tx[32];
1338 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1339 rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1341 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1342 if (rmrMsg == nullptr) {
1343 mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1344 } else if (rmrMsg->state != 0) {
1345 char meid[RMR_MAX_MEID]{};
1346 if (rmrMsg->state == RMR_ERR_RETRY) {
1349 mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1350 rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1351 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1352 if (rmrMsg == nullptr) {
1353 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1354 } else if (rmrMsg->state != 0) {
1355 mdclog_write(MDCLOG_ERR,
1356 "RMR Retry failed %s sending request %d to Xapp from %s",
1357 translateRmrErrorMessages(rmrMsg->state).c_str(),
1359 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1362 mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1363 translateRmrErrorMessages(rmrMsg->state).c_str(),
1365 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1368 message.peerInfo->gotSetup = true;
1369 buildJsonMessage(message);
1370 if (rmrMsg != nullptr) {
1371 rmr_free_msg(rmrMsg);
1376 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1378 runFunXML_v.clear();
1379 for (auto j = 0; j < list.list.count; j++) {
1380 auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1381 if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1382 (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1384 E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1385 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1386 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1387 (void **)&ranFunDef,
1388 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1389 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1390 if (rval.code != RC_OK) {
1391 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1393 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1397 auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1398 unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1399 memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1401 auto er = asn_encode_to_buffer(nullptr,
1403 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1407 if (er.encoded == -1) {
1408 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1409 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1411 } else if (er.encoded > (ssize_t)xml_buffer_size) {
1412 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1413 (int) xml_buffer_size,
1414 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1416 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1417 mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1418 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1423 string runFuncs = (char *)(xml_buffer);
1424 runFunXML_v.emplace_back(runFuncs);
1431 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1432 Sctp_Map_t *sctpMap,
1433 ReportingMessages_t &message,
1434 vector <string> &RANfunctionsAdded_v,
1435 vector <string> &RANfunctionsModified_v) {
1436 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1437 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1438 auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1439 if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1440 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1441 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1442 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1443 ie->value.choice.RANfunctions_List.list.count);
1445 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1449 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1450 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1451 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1452 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1453 ie->value.choice.RANfunctions_List.list.count);
1455 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1461 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1462 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1463 RANfunctionsAdded_v.size());
1470 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1471 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1472 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1474 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1475 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1477 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1478 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1480 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1481 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1483 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1484 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1485 // ---------------------------------------------
1486 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1487 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1489 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1490 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1492 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1493 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1495 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1496 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1497 //-------------------------------------------------------------
1499 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1500 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1502 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1503 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1505 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1506 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1508 //====================================================================================
1509 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1510 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1512 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1513 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1515 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1516 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1518 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1519 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1521 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1522 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1524 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1525 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1526 //---------------------------------------------------------------------------------------------------------
1527 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1528 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1530 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1531 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1533 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1534 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1535 //----------------------------------------------------------------------------------------------------------------
1536 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1537 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1539 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1540 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1547 * @param RANfunctionsAdded_v
1550 int collectSetupRequestData(E2AP_PDU_t *pdu,
1551 Sctp_Map_t *sctpMap,
1552 ReportingMessages_t &message,
1553 vector <string> &RANfunctionsAdded_v) {
1554 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1555 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1556 auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1557 if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1558 // get the ran name for meid
1559 if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1560 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1561 mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1562 // no mesage will be sent
1566 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1567 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1569 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1570 if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1571 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1572 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1573 ie->value.choice.RANfunctions_List.list.count);
1575 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1581 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1582 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1583 RANfunctionsAdded_v.size());
1588 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1589 E2AP_PDU_t *pdu = nullptr;
1591 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1592 mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1593 rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1595 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1596 rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1597 if (rval.code != RC_OK) {
1598 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
1600 message.message.enodbName);
1604 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1605 auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1606 rmrMessageBuffer.sendMessage->payload, buff_size);
1607 if (er.encoded == -1) {
1608 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1610 } else if (er.encoded > (ssize_t)buff_size) {
1611 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1612 (int)rmrMessageBuffer.sendMessage->len,
1613 asn_DEF_E2AP_PDU.name,
1618 rmrMessageBuffer.sendMessage->len = er.encoded;
1627 * @param rmrMessageBuffer
1629 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1630 Sctp_Map_t *sctpMap,
1631 ReportingMessages_t &message,
1632 RmrMessagesBuffer_t &rmrMessageBuffer) {
1633 auto logLevel = mdclog_level_get();
1634 auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1635 if (logLevel >= MDCLOG_DEBUG) {
1636 mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1638 switch (procedureCode) {
1639 case ProcedureCode_id_E2setup: {
1640 if (logLevel >= MDCLOG_DEBUG) {
1641 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1644 vector <string> RANfunctionsAdded_v;
1645 vector <string> RANfunctionsModified_v;
1646 RANfunctionsAdded_v.clear();
1647 RANfunctionsModified_v.clear();
1648 if (collectSetupRequestData(pdu, sctpMap, message, RANfunctionsAdded_v) != 0) {
1652 buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1654 string messageName("E2setupRequest");
1655 string ieName("E2setupRequestIEs");
1656 message.message.messageType = RIC_E2_SETUP_REQ;
1657 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
1658 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1659 buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
1662 case ProcedureCode_id_RICserviceUpdate: {
1663 if (logLevel >= MDCLOG_DEBUG) {
1664 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1666 vector <string> RANfunctionsAdded_v;
1667 vector <string> RANfunctionsModified_v;
1668 RANfunctionsAdded_v.clear();
1669 RANfunctionsModified_v.clear();
1670 if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1671 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1675 string messageName("RICserviceUpdate");
1676 string ieName("RICserviceUpdateIEs");
1677 message.message.messageType = RIC_SERVICE_UPDATE;
1678 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
1679 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1681 buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
1684 case ProcedureCode_id_ErrorIndication: {
1685 if (logLevel >= MDCLOG_DEBUG) {
1686 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1688 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
1689 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1690 if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1691 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1695 case ProcedureCode_id_Reset: {
1696 if (logLevel >= MDCLOG_DEBUG) {
1697 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1700 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1701 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1702 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1706 if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1707 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1711 case ProcedureCode_id_RICindication: {
1712 if (logLevel >= MDCLOG_DEBUG) {
1713 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1715 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1716 auto messageSent = false;
1717 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1718 if (logLevel >= MDCLOG_DEBUG) {
1719 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1721 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1722 if (logLevel >= MDCLOG_DEBUG) {
1723 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1725 if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1726 static unsigned char tx[32];
1727 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1728 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1729 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1730 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1731 (unsigned char *)message.message.enodbName,
1732 strlen(message.message.enodbName));
1733 rmrMessageBuffer.sendMessage->state = 0;
1734 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1736 //ie->value.choice.RICrequestID.ricInstanceID;
1737 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1738 mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1739 rmrMessageBuffer.sendMessage->sub_id,
1740 rmrMessageBuffer.sendMessage->mtype,
1741 ie->value.choice.RICrequestID.ricInstanceID,
1742 ie->value.choice.RICrequestID.ricRequestorID);
1744 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication - 1]->Increment();
1745 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1746 sendRmrMessage(rmrMessageBuffer, message);
1749 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1759 mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1760 message.message.messageType = 0; // no RMR message type yet
1762 buildJsonMessage(message);
1773 * @param rmrMessageBuffer
1775 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1776 Sctp_Map_t *sctpMap,
1777 ReportingMessages_t &message,
1778 RmrMessagesBuffer_t &rmrMessageBuffer) {
1779 auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1780 auto logLevel = mdclog_level_get();
1781 if (logLevel >= MDCLOG_INFO) {
1782 mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1784 switch (procedureCode) {
1785 case ProcedureCode_id_Reset: {
1786 if (logLevel >= MDCLOG_DEBUG) {
1787 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1789 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
1790 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1791 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1794 if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1795 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1799 case ProcedureCode_id_RICcontrol: {
1800 if (logLevel >= MDCLOG_DEBUG) {
1801 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1804 i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1805 auto messageSent = false;
1806 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1807 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1808 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1810 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1811 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1812 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1814 if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1815 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1816 rmrMessageBuffer.sendMessage->state = 0;
1817 // rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1818 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1820 static unsigned char tx[32];
1821 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1822 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1823 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1824 (unsigned char *)message.message.enodbName,
1825 strlen(message.message.enodbName));
1827 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1828 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1829 sendRmrMessage(rmrMessageBuffer, message);
1832 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1842 case ProcedureCode_id_RICsubscription: {
1843 if (logLevel >= MDCLOG_DEBUG) {
1844 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1846 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1847 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1848 if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1849 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1853 case ProcedureCode_id_RICsubscriptionDelete: {
1854 if (logLevel >= MDCLOG_DEBUG) {
1855 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1857 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1858 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1859 if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1860 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1865 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1866 message.message.messageType = 0; // no RMR message type yet
1867 buildJsonMessage(message);
1878 * @param rmrMessageBuffer
1880 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1881 Sctp_Map_t *sctpMap,
1882 ReportingMessages_t &message,
1883 RmrMessagesBuffer_t &rmrMessageBuffer) {
1884 auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1885 auto logLevel = mdclog_level_get();
1886 if (logLevel >= MDCLOG_INFO) {
1887 mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1889 switch (procedureCode) {
1890 case ProcedureCode_id_RICcontrol: {
1891 if (logLevel >= MDCLOG_DEBUG) {
1892 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1895 i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1896 auto messageSent = false;
1897 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1898 if (logLevel >= MDCLOG_DEBUG) {
1899 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1901 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1902 if (logLevel >= MDCLOG_DEBUG) {
1903 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1905 if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1906 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1907 rmrMessageBuffer.sendMessage->state = 0;
1908 // rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1909 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1910 static unsigned char tx[32];
1911 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1912 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1913 rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1914 strlen(message.message.enodbName));
1915 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
1916 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1917 sendRmrMessage(rmrMessageBuffer, message);
1920 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1929 case ProcedureCode_id_RICsubscription: {
1930 if (logLevel >= MDCLOG_DEBUG) {
1931 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1933 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
1934 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1935 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1936 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1940 case ProcedureCode_id_RICsubscriptionDelete: {
1941 if (logLevel >= MDCLOG_DEBUG) {
1942 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1944 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
1945 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
1946 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1947 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1952 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1953 message.message.messageType = 0; // no RMR message type yet
1955 buildJsonMessage(message);
1966 * @param rmrMmessageBuffer
1969 int sendRequestToXapp(ReportingMessages_t &message,
1971 RmrMessagesBuffer_t &rmrMmessageBuffer) {
1972 rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1973 (unsigned char *)message.message.enodbName,
1974 strlen(message.message.enodbName));
1975 message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1976 rmrMmessageBuffer.sendMessage->state = 0;
1977 static unsigned char tx[32];
1978 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1979 rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1981 auto rc = sendRmrMessage(rmrMmessageBuffer, message);
1987 * @param pSctpParams
1989 void getRmrContext(sctp_params_t &pSctpParams) {
1990 pSctpParams.rmrCtx = nullptr;
1991 pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
1992 if (pSctpParams.rmrCtx == nullptr) {
1993 mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
1997 rmr_set_stimeout(pSctpParams.rmrCtx, 0); // disable retries for any send operation
1998 // we need to find that routing table exist and we can run
1999 if (mdclog_level_get() >= MDCLOG_INFO) {
2000 mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2005 if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2009 if (count % 60 == 0) {
2010 mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2013 if (mdclog_level_get() >= MDCLOG_INFO) {
2014 mdclog_write(MDCLOG_INFO, "RMR running");
2016 rmr_init_trace(pSctpParams.rmrCtx, 200);
2017 // get the RMR fd for the epoll
2018 pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2019 struct epoll_event event{};
2020 // add RMR fd to epoll
2021 event.events = (EPOLLIN);
2022 event.data.fd = pSctpParams.rmrListenFd;
2023 // add listening RMR FD to epoll
2024 if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2025 mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2026 close(pSctpParams.rmrListenFd);
2027 rmr_close(pSctpParams.rmrCtx);
2028 pSctpParams.rmrCtx = nullptr;
2035 * @param rmrMessageBuffer
2038 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2039 E2AP_PDU_t *pdu = nullptr;
2041 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2042 mdclog_write(MDCLOG_DEBUG, "got xml Format data from xApp of size %d is:%s",
2043 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2045 auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2046 rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2047 if (rval.code != RC_OK) {
2048 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2050 message.message.enodbName);
2054 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2055 auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2056 rmrMessageBuffer.rcvMessage->payload, buff_size);
2057 if (er.encoded == -1) {
2058 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2060 } else if (er.encoded > (ssize_t)buff_size) {
2061 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2062 (int)rmrMessageBuffer.rcvMessage->len,
2063 asn_DEF_E2AP_PDU.name,
2068 rmrMessageBuffer.rcvMessage->len = er.encoded;
2075 * @param rmrMessageBuffer
2079 int receiveXappMessages(Sctp_Map_t *sctpMap,
2080 RmrMessagesBuffer_t &rmrMessageBuffer,
2081 struct timespec &ts) {
2082 if (rmrMessageBuffer.rcvMessage == nullptr) {
2084 mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2088 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2089 mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2091 rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2092 if (rmrMessageBuffer.rcvMessage == nullptr) {
2093 mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2094 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2097 ReportingMessages_t message;
2098 message.message.direction = 'D';
2099 message.message.time.tv_nsec = ts.tv_nsec;
2100 message.message.time.tv_sec = ts.tv_sec;
2102 // get message payload
2103 //auto msgData = msg->payload;
2104 if (rmrMessageBuffer.rcvMessage->state != 0) {
2105 mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2108 rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2109 message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2110 if (message.peerInfo == nullptr) {
2111 auto type = rmrMessageBuffer.rcvMessage->mtype;
2113 case RIC_SCTP_CLEAR_ALL:
2114 case E2_TERM_KEEP_ALIVE_REQ:
2115 case RIC_HEALTH_CHECK_REQ:
2118 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2123 switch (rmrMessageBuffer.rcvMessage->mtype) {
2124 case RIC_E2_SETUP_RESP : {
2125 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2128 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2129 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2130 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2131 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2136 case RIC_E2_SETUP_FAILURE : {
2137 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2140 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
2141 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2142 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2143 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2148 case RIC_ERROR_INDICATION: {
2149 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
2150 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2151 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2152 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2158 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
2159 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2160 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2161 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2166 case RIC_SUB_DEL_REQ: {
2167 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
2168 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2169 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2170 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2175 case RIC_CONTROL_REQ: {
2176 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
2177 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2178 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2179 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2184 case RIC_SERVICE_QUERY: {
2185 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2188 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment();
2189 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2190 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2191 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2196 case RIC_SERVICE_UPDATE_ACK: {
2197 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2200 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2201 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2202 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2203 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2208 case RIC_SERVICE_UPDATE_FAILURE: {
2209 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2212 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
2213 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2214 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2215 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2220 case RIC_E2_RESET_REQ: {
2221 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2224 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2225 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2226 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2227 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2232 case RIC_E2_RESET_RESP: {
2233 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2236 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
2237 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
2238 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2239 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2244 case RIC_SCTP_CLEAR_ALL: {
2245 mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2246 // loop on all keys and close socket and then erase all map.
2248 sctpMap->getKeys(v);
2249 for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2250 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2251 auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2252 if (peerInfo == nullptr) {
2255 close(peerInfo->fileDescriptor);
2256 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2257 message.message.direction = 'D';
2258 message.message.time.tv_nsec = ts.tv_nsec;
2259 message.message.time.tv_sec = ts.tv_sec;
2261 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2262 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2264 "%s|RIC_SCTP_CLEAR_ALL",
2265 peerInfo->enodbName);
2266 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2267 mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2268 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2269 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2279 case E2_TERM_KEEP_ALIVE_REQ: {
2280 // send message back
2281 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2282 (unsigned char *)rmrMessageBuffer.ka_message,
2283 rmrMessageBuffer.ka_message_len);
2284 rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2285 rmrMessageBuffer.sendMessage->state = 0;
2286 static unsigned char tx[32];
2287 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2288 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2289 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2290 if (rmrMessageBuffer.sendMessage == nullptr) {
2291 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2292 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2293 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2294 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2295 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2296 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2297 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2302 case RIC_HEALTH_CHECK_REQ: {
2303 // send message back
2304 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2305 (unsigned char *)"OK",
2307 rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2308 rmrMessageBuffer.sendMessage->state = 0;
2309 static unsigned char tx[32];
2310 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2311 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2312 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2313 if (rmrMessageBuffer.sendMessage == nullptr) {
2314 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2315 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2316 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2317 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2318 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2319 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2320 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2327 mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2328 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2329 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2330 message.message.time.tv_nsec = ts.tv_nsec;
2331 message.message.time.tv_sec = ts.tv_sec;
2332 message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2334 buildJsonMessage(message);
2339 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2340 mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2346 * Send message to the CU that is not expecting for successful or unsuccessful results
2347 * @param messageBuffer
2349 * @param failedMsgId
2353 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2354 ReportingMessages_t &message,
2356 Sctp_Map_t *sctpMap) {
2358 getRequestMetaData(message, messageBuffer);
2359 if (mdclog_level_get() >= MDCLOG_INFO) {
2360 mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2363 auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2370 * @param messageBuffer
2372 * @param failedMesgId
2375 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2376 RmrMessagesBuffer_t &messageBuffer,
2377 ReportingMessages_t &message,
2380 message.message.messageType = messageBuffer.rcvMessage->mtype;
2381 auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2387 * @param rmrCtx the rmr context to send and receive
2388 * @param msg the msg we got fromxApp
2389 * @param metaData data from xApp in ordered struct
2390 * @param failedMesgId the return message type error
2393 sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
2394 rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
2395 msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
2396 message.message.enodbName);
2397 if (mdclog_level_get() >= MDCLOG_INFO) {
2398 mdclog_write(MDCLOG_INFO, "%s", msg->payload);
2400 msg->mtype = failedMesgId;
2403 static unsigned char tx[32];
2404 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2405 rmr_bytes2xact(msg, tx, strlen((const char *) tx));
2407 sendRmrMessage(rmrMessageBuffer, message);
2422 int addToEpoll(int epoll_fd,
2423 ConnectedCU_t *peerInfo,
2425 Sctp_Map_t *sctpMap,
2429 struct epoll_event event{};
2430 event.data.ptr = peerInfo;
2431 event.events = events;
2432 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2433 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2434 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2435 strerror(errno), __func__, __LINE__);
2437 close(peerInfo->fileDescriptor);
2438 if (enodbName != nullptr) {
2439 cleanHashEntry(peerInfo, sctpMap);
2440 char key[MAX_ENODB_NAME_SIZE * 2];
2441 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2442 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2443 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2445 auto tmp = sctpMap->find(key);
2448 sctpMap->erase(key);
2451 peerInfo->enodbName[0] = 0;
2453 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2469 int modifyToEpoll(int epoll_fd,
2470 ConnectedCU_t *peerInfo,
2472 Sctp_Map_t *sctpMap,
2476 struct epoll_event event{};
2477 event.data.ptr = peerInfo;
2478 event.events = events;
2479 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2480 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2481 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2482 strerror(errno), __func__, __LINE__);
2484 close(peerInfo->fileDescriptor);
2485 cleanHashEntry(peerInfo, sctpMap);
2486 char key[MAX_ENODB_NAME_SIZE * 2];
2487 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2488 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2489 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2491 auto tmp = sctpMap->find(key);
2495 sctpMap->erase(key);
2496 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2503 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2504 buildJsonMessage(message);
2506 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2508 if (rmrMessageBuffer.sendMessage == nullptr) {
2509 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2510 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2514 if (rmrMessageBuffer.sendMessage->state != 0) {
2515 char meid[RMR_MAX_MEID]{};
2516 if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2518 rmrMessageBuffer.sendMessage->state = 0;
2519 mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2520 rmrMessageBuffer.sendMessage->mtype,
2521 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2522 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2523 if (rmrMessageBuffer.sendMessage == nullptr) {
2524 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2525 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2527 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2528 mdclog_write(MDCLOG_ERR,
2529 "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2530 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2531 rmrMessageBuffer.sendMessage->mtype,
2532 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2533 auto rc = rmrMessageBuffer.sendMessage->state;
2537 mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2538 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2539 rmrMessageBuffer.sendMessage->mtype,
2540 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2541 return rmrMessageBuffer.sendMessage->state;
2547 void buildJsonMessage(ReportingMessages_t &message) {
2549 message.outLen = sizeof(message.base64Data);
2550 base64::encode((const unsigned char *) message.message.asndata,
2551 (const int) message.message.asnLength,
2554 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2555 mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2556 (int) message.message.asnLength,
2557 (int) message.outLen);
2560 snprintf(message.buffer, sizeof(message.buffer),
2561 "{\"header\": {\"ts\": \"%ld.%09ld\","
2562 "\"ranName\": \"%s\","
2563 "\"messageType\": %d,"
2564 "\"direction\": \"%c\"},"
2565 "\"base64Length\": %d,"
2566 "\"asnBase64\": \"%s\"}",
2567 message.message.time.tv_sec,
2568 message.message.time.tv_nsec,
2569 message.message.enodbName,
2570 message.message.messageType,
2571 message.message.direction,
2572 (int) message.outLen,
2573 message.base64Data);
2574 static src::logger_mt &lg = my_logger::get();
2576 BOOST_LOG(lg) << message.buffer;
2582 * take RMR error code to string
2586 string translateRmrErrorMessages(int state) {
2590 str = "RMR_OK - state is good";
2592 case RMR_ERR_BADARG:
2593 str = "RMR_ERR_BADARG - argument passd to function was unusable";
2595 case RMR_ERR_NOENDPT:
2596 str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2599 str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2602 str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2604 case RMR_ERR_SENDFAILED:
2605 str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2607 case RMR_ERR_CALLFAILED:
2608 str = "RMR_ERR_CALLFAILED - unable to send call() message";
2610 case RMR_ERR_NOWHOPEN:
2611 str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2614 str = "RMR_ERR_WHID - wormhole id was invalid";
2616 case RMR_ERR_OVERFLOW:
2617 str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2620 str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2622 case RMR_ERR_RCVFAILED:
2623 str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2625 case RMR_ERR_TIMEOUT:
2626 str = "RMR_ERR_TIMEOUT - message processing call timed out";
2629 str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2632 str = "RMR_ERR_TRUNC - received message likely truncated";
2634 case RMR_ERR_INITFAILED:
2635 str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2637 case RMR_ERR_NOTSUPP:
2638 str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2642 snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);