1 // Copyright 2019 AT&T Intellectual Property
2 // Copyright 2019 Nokia
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // This source code is part of the near-RT RIC (RAN Intelligent Controller)
17 // platform project (RICP).
19 // TODO: High-level file comment.
23 #include <3rdparty/oranE2/RANfunctions-List.h>
24 #include "sctpThread.h"
25 #include "BuildRunName.h"
27 //#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
28 //#include "BuildXml.h"
29 //#include "pugixml/src/pugixml.hpp"
32 //using namespace std::placeholders;
33 using namespace boost::filesystem;
34 using namespace prometheus;
42 // need to expose without the include of gcov
43 extern "C" void __gcov_flush(void);
45 static void catch_function(int signal) {
51 BOOST_LOG_INLINE_GLOBAL_LOGGER_DEFAULT(my_logger, src::logger_mt)
53 boost::shared_ptr<sinks::synchronous_sink<sinks::text_file_backend>> boostLogger;
54 double cpuClock = 0.0;
55 bool jsonTrace = true;
59 mdclog_attr_init(&attr);
60 mdclog_attr_set_ident(attr, "E2Terminator");
62 mdclog_attr_destroy(attr);
64 auto start_time = std::chrono::high_resolution_clock::now();
65 typedef std::chrono::duration<double, std::ratio<1,1>> seconds_t;
68 return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
71 double approx_CPU_MHz(unsigned sleeptime) {
72 using namespace std::chrono_literals;
74 uint64_t cycles_start = rdtscp(aux);
75 double time_start = age();
76 std::this_thread::sleep_for(sleeptime * 1ms);
77 uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
78 double elapsed_time = age() - time_start;
79 return elapsed_cycles / elapsed_time;
82 //std::atomic<int64_t> rmrCounter{0};
83 std::atomic<int64_t> num_of_messages{0};
84 std::atomic<int64_t> num_of_XAPP_messages{0};
85 static long transactionCounter = 0;
87 int buildListeningPort(sctp_params_t &sctpParams) {
88 sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
89 if (sctpParams.listenFD <= 0) {
90 mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
94 struct sockaddr_in6 servaddr {};
95 servaddr.sin6_family = AF_INET6;
96 servaddr.sin6_addr = in6addr_any;
97 servaddr.sin6_port = htons(sctpParams.sctpPort);
98 if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
99 mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
102 if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
103 //mdclog_write(MDCLOG_ERR, "Error binding. %s", strerror(errno));
106 if (mdclog_level_get() >= MDCLOG_DEBUG) {
107 struct sockaddr_in6 cliaddr {};
108 socklen_t len = sizeof(cliaddr);
109 getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
111 inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
112 mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
115 if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
116 mdclog_write(MDCLOG_ERR, "Error listening. %s\n", strerror(errno));
119 struct epoll_event event {};
120 event.events = EPOLLIN | EPOLLET;
121 event.data.fd = sctpParams.listenFD;
123 // add listening port to epoll
124 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.listenFD, &event)) {
125 printf("Failed to add descriptor to epoll\n");
126 mdclog_write(MDCLOG_ERR, "Failed to add descriptor to epoll. %s\n", strerror(errno));
133 int buildConfiguration(sctp_params_t &sctpParams) {
134 path p = (sctpParams.configFilePath + "/" + sctpParams.configFileName).c_str();
136 const int size = 2048;
137 auto fileSize = file_size(p);
138 if (fileSize > size) {
139 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
143 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
148 if (conf.openConfigFile(p.string()) == -1) {
149 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
150 p.string().c_str(), strerror(errno));
153 int rmrPort = conf.getIntValue("nano");
155 mdclog_write(MDCLOG_ERR, "illigal RMR port ");
158 sctpParams.rmrPort = (uint16_t)rmrPort;
159 snprintf(sctpParams.rmrAddress, sizeof(sctpParams.rmrAddress), "%d", (int) (sctpParams.rmrPort));
161 auto tmpStr = conf.getStringValue("loglevel");
162 if (tmpStr.length() == 0) {
163 mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
166 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
168 if ((tmpStr.compare("debug")) == 0) {
169 sctpParams.logLevel = MDCLOG_DEBUG;
170 } else if ((tmpStr.compare("info")) == 0) {
171 sctpParams.logLevel = MDCLOG_INFO;
172 } else if ((tmpStr.compare("warning")) == 0) {
173 sctpParams.logLevel = MDCLOG_WARN;
174 } else if ((tmpStr.compare("error")) == 0) {
175 sctpParams.logLevel = MDCLOG_ERR;
177 mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
178 sctpParams.logLevel = MDCLOG_INFO;
180 mdclog_level_set(sctpParams.logLevel);
182 tmpStr = conf.getStringValue("volume");
183 if (tmpStr.length() == 0) {
184 mdclog_write(MDCLOG_ERR, "illigal volume.");
188 char tmpLogFilespec[VOLUME_URL_SIZE];
189 tmpLogFilespec[0] = 0;
190 sctpParams.volume[0] = 0;
191 snprintf(sctpParams.volume, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
192 // copy the name to temp file as well
193 snprintf(tmpLogFilespec, VOLUME_URL_SIZE, "%s", tmpStr.c_str());
196 // define the file name in the tmp directory under the volume
197 strcat(tmpLogFilespec,"/tmp/E2Term_%Y-%m-%d_%H-%M-%S.%N.tmpStr");
199 sctpParams.myIP = conf.getStringValue("local-ip");
200 if (sctpParams.myIP.length() == 0) {
201 mdclog_write(MDCLOG_ERR, "illigal local-ip.");
205 int sctpPort = conf.getIntValue("sctp-port");
206 if (sctpPort == -1) {
207 mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
210 sctpParams.sctpPort = (uint16_t)sctpPort;
212 sctpParams.fqdn = conf.getStringValue("external-fqdn");
213 if (sctpParams.fqdn.length() == 0) {
214 mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
218 std::string pod = conf.getStringValue("pod_name");
219 if (pod.length() == 0) {
220 mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
223 auto *podName = getenv(pod.c_str());
224 if (podName == nullptr) {
225 mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
229 sctpParams.podName.assign(podName);
230 if (sctpParams.podName.length() == 0) {
231 mdclog_write(MDCLOG_ERR, "illigal pod_name");
236 tmpStr = conf.getStringValue("trace");
237 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
238 if ((tmpStr.compare("start")) == 0) {
239 mdclog_write(MDCLOG_INFO, "Trace set to: start");
240 sctpParams.trace = true;
241 } else if ((tmpStr.compare("stop")) == 0) {
242 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
243 sctpParams.trace = false;
245 jsonTrace = sctpParams.trace;
247 sctpParams.epollTimeOut = -1;
248 tmpStr = conf.getStringValue("prometheusMode");
249 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
250 sctpParams.prometheusMode = tmpStr;
251 if (tmpStr.length() != 0) {
252 if (tmpStr.compare("push") == 0) {
253 sctpParams.prometheusPushAddress = tmpStr;
254 auto timeout = conf.getIntValue("prometheusPushTimeOut");
255 if (timeout >= 5 && timeout <= 300) {
256 sctpParams.epollTimeOut = timeout * 1000;
258 sctpParams.epollTimeOut = 10 * 1000;
263 tmpStr = conf.getStringValue("prometheusPushAddr");
264 if (tmpStr.length() != 0) {
265 sctpParams.prometheusPushAddress = tmpStr;
268 tmpStr = conf.getStringValue("prometheusPort");
269 if (tmpStr.length() != 0) {
270 sctpParams.prometheusPort = tmpStr;
273 sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
275 "\"pod_name\": \"%s\"}",
276 (const char *)sctpParams.myIP.c_str(),
278 sctpParams.fqdn.c_str(),
279 sctpParams.podName.c_str());
281 if (mdclog_level_get() >= MDCLOG_INFO) {
282 mdclog_mdc_add("RMR Port", to_string(sctpParams.rmrPort).c_str());
283 mdclog_mdc_add("LogLevel", to_string(sctpParams.logLevel).c_str());
284 mdclog_mdc_add("volume", sctpParams.volume);
285 mdclog_mdc_add("tmpLogFilespec", tmpLogFilespec);
286 mdclog_mdc_add("my ip", sctpParams.myIP.c_str());
287 mdclog_mdc_add("pod name", sctpParams.podName.c_str());
289 mdclog_write(MDCLOG_INFO, "running parameters for instance : %s", sctpParams.ka_message);
293 // Files written to the current working directory
294 boostLogger = logging::add_file_log(
295 keywords::file_name = tmpLogFilespec, // to temp directory
296 keywords::rotation_size = 10 * 1024 * 1024,
297 keywords::time_based_rotation = sinks::file::rotation_at_time_interval(posix_time::hours(1)),
298 keywords::format = "%Message%"
299 //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
302 // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
303 boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
304 keywords::target = sctpParams.volume
307 // Upon restart, scan the directory for files matching the file_name pattern
308 boostLogger->locked_backend()->scan_for_files();
310 // Enable auto-flushing after each tmpStr record written
311 if (mdclog_level_get() >= MDCLOG_DEBUG) {
312 boostLogger->locked_backend()->auto_flush(true);
318 static std::string GetHostName() {
321 if (::gethostname(hostname, sizeof(hostname))) {
327 void startPrometheus(sctp_params_t &sctpParams) {
328 sctpParams.prometheusFamily = &BuildCounter()
330 .Help("E2T message counter")
331 .Labels({{"POD_NAME", sctpParams.podName}})
332 .Register(*sctpParams.prometheusRegistry);
334 if (strcmp(sctpParams.prometheusMode.c_str(),"pull") == 0) {
335 if (mdclog_level_get() >= MDCLOG_DEBUG) {
336 mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s:%s", sctpParams.myIP.c_str(), sctpParams.prometheusPort.c_str());
338 sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
339 sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
340 } else if (strcmp(sctpParams.prometheusMode.c_str(),"push") == 0) {
341 if (mdclog_level_get() >= MDCLOG_DEBUG) {
342 mdclog_write(MDCLOG_DEBUG, "Start Prometheus Push mode");
344 const auto labels = Gateway::GetInstanceLabel(GetHostName());
348 auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
349 // If string doesn't have
350 // character ch present in it
351 if (found != string::npos) {
352 address = sctpParams.prometheusPushAddress.substr(0,found);
353 port = sctpParams.prometheusPushAddress.substr(found + 1);
354 sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
355 sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
357 mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
362 int main(const int argc, char **argv) {
363 sctp_params_t sctpParams;
366 std::random_device device{};
367 std::mt19937 generator(device());
368 std::uniform_int_distribution<long> distribution(1, (long) 1e12);
369 transactionCounter = distribution(generator);
373 // uint32_t aux1 = 0;
374 // st = rdtscp(aux1);
376 unsigned num_cpus = std::thread::hardware_concurrency();
378 mdclog_level_set(MDCLOG_INFO);
380 if (std::signal(SIGINT, catch_function) == SIG_ERR) {
381 mdclog_write(MDCLOG_ERR, "Error initializing SIGINT");
384 if (std::signal(SIGABRT, catch_function)== SIG_ERR) {
385 mdclog_write(MDCLOG_ERR, "Error initializing SIGABRT");
388 if (std::signal(SIGTERM, catch_function)== SIG_ERR) {
389 mdclog_write(MDCLOG_ERR, "Error initializing SIGTERM");
393 cpuClock = approx_CPU_MHz(100);
395 mdclog_write(MDCLOG_DEBUG, "CPU speed %11.11f", cpuClock);
397 auto result = parse(argc, argv, sctpParams);
399 if (buildConfiguration(sctpParams) != 0) {
403 //auto registry = std::make_shared<Registry>();
404 sctpParams.prometheusRegistry = std::make_shared<Registry>();
406 //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
408 startPrometheus(sctpParams);
411 sctpParams.epoll_fd = epoll_create1(0);
412 if (sctpParams.epoll_fd == -1) {
413 mdclog_write(MDCLOG_ERR, "failed to open epoll descriptor");
417 getRmrContext(sctpParams);
418 if (sctpParams.rmrCtx == nullptr) {
419 close(sctpParams.epoll_fd);
423 if (buildInotify(sctpParams) == -1) {
424 close(sctpParams.rmrListenFd);
425 rmr_close(sctpParams.rmrCtx);
426 close(sctpParams.epoll_fd);
430 if (buildListeningPort(sctpParams) != 0) {
431 close(sctpParams.rmrListenFd);
432 rmr_close(sctpParams.rmrCtx);
433 close(sctpParams.epoll_fd);
437 sctpParams.sctpMap = new mapWrapper();
439 std::vector<std::thread> threads(num_cpus);
440 // std::vector<std::thread> threads;
443 for (unsigned int i = 0; i < num_cpus; i++) {
444 threads[i] = std::thread(listener, &sctpParams);
449 int rc = pthread_setaffinity_np(threads[i].native_handle(), sizeof(cpu_set_t), &cpuset);
451 mdclog_write(MDCLOG_ERR, "Error calling pthread_setaffinity_np: %d", rc);
456 //loop over term_init until first message from xApp
457 handleTermInit(sctpParams);
459 for (auto &t : threads) {
466 void handleTermInit(sctp_params_t &sctpParams) {
467 sendTermInit(sctpParams);
468 //send to e2 manager init of e2 term
473 auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
474 if (xappMessages > 0) {
475 if (mdclog_level_get() >= MDCLOG_INFO) {
476 mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
482 if (count % 1000 == 0) {
483 mdclog_write(MDCLOG_ERR, "GOT No messages from any xApp");
484 sendTermInit(sctpParams);
489 void sendTermInit(sctp_params_t &sctpParams) {
490 rmr_mbuf_t *msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
493 msg->mtype = E2_TERM_INIT;
495 rmr_bytes2payload(msg, (unsigned char *)sctpParams.ka_message, sctpParams.ka_message_length);
496 static unsigned char tx[32];
497 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
498 rmr_bytes2xact(msg, tx, txLen);
499 msg = rmr_send_msg(sctpParams.rmrCtx, msg);
500 if (msg == nullptr) {
501 msg = rmr_alloc_msg(sctpParams.rmrCtx, sctpParams.ka_message_length);
502 } else if (msg->state == 0) {
504 if (mdclog_level_get() >= MDCLOG_INFO) {
505 mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
509 if (count % 100 == 0) {
510 mdclog_write(MDCLOG_ERR, "Error sending E2_TERM_INIT cause : %s ", translateRmrErrorMessages(msg->state).c_str());
525 cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
526 cxxopts::Options options(argv[0], "e2 term help");
527 options.positional_help("[optional args]").show_positional_help();
528 options.allow_unrecognised_options().add_options()
529 ("p,path", "config file path", cxxopts::value<std::string>(sctpParams.configFilePath)->default_value("config"))
530 ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
531 ("h,help", "Print help");
533 auto result = options.parse(argc, argv);
535 if (result.count("help")) {
536 std::cout << options.help({""}) << std::endl;
545 * @return -1 failed 0 success
547 int buildInotify(sctp_params_t &sctpParams) {
548 sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
549 if (sctpParams.inotifyFD == -1) {
550 mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
551 close(sctpParams.rmrListenFd);
552 rmr_close(sctpParams.rmrCtx);
553 close(sctpParams.epoll_fd);
557 sctpParams.inotifyWD = inotify_add_watch(sctpParams.inotifyFD,
558 (const char *)sctpParams.configFilePath.c_str(),
559 (unsigned)IN_OPEN | (unsigned)IN_CLOSE_WRITE | (unsigned)IN_CLOSE_NOWRITE); //IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
560 if (sctpParams.inotifyWD == -1) {
561 mdclog_write(MDCLOG_ERR, "Failed to add directory : %s to inotify (inotify_add_watch) %s",
562 sctpParams.configFilePath.c_str(),
564 close(sctpParams.inotifyFD);
568 struct epoll_event event{};
569 event.events = (EPOLLIN);
570 event.data.fd = sctpParams.inotifyFD;
571 // add listening RMR FD to epoll
572 if (epoll_ctl(sctpParams.epoll_fd, EPOLL_CTL_ADD, sctpParams.inotifyFD, &event)) {
573 mdclog_write(MDCLOG_ERR, "Failed to add inotify FD to epoll");
574 close(sctpParams.inotifyFD);
585 void listener(sctp_params_t *params) {
586 int num_of_SCTP_messages = 0;
587 auto totalTime = 0.0;
589 mdclog_level_set(params->logLevel);
591 std::thread::id this_id = std::this_thread::get_id();
593 streambuf *oldCout = cout.rdbuf();
594 ostringstream memCout;
596 cout.rdbuf(memCout.rdbuf());
598 //return to the normal cout
602 memcpy(tid, memCout.str().c_str(), memCout.str().length() < 32 ? memCout.str().length() : 31);
603 tid[memCout.str().length()] = 0;
604 mdclog_mdc_add("thread id", tid);
606 if (mdclog_level_get() >= MDCLOG_DEBUG) {
607 mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
610 RmrMessagesBuffer_t rmrMessageBuffer{};
611 //create and init RMR
612 rmrMessageBuffer.rmrCtx = params->rmrCtx;
614 auto *events = (struct epoll_event *) calloc(MAXEVENTS, sizeof(struct epoll_event));
615 struct timespec end{0, 0};
616 struct timespec start{0, 0};
618 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
619 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
621 memcpy(rmrMessageBuffer.ka_message, params->ka_message, params->ka_message_length);
622 rmrMessageBuffer.ka_message_len = params->ka_message_length;
623 rmrMessageBuffer.ka_message[rmrMessageBuffer.ka_message_len] = 0;
625 if (mdclog_level_get() >= MDCLOG_DEBUG) {
626 mdclog_write(MDCLOG_DEBUG, "keep alive message is : %s", rmrMessageBuffer.ka_message);
629 ReportingMessages_t message {};
631 // for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
632 // rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
633 // rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
636 bool gatewayflag = false;
640 if (mdclog_level_get() >= MDCLOG_DEBUG) {
641 mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
643 auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
644 if (numOfEvents == 0) {
645 if (params->prometheusGateway != nullptr) {
646 gateWay = params->prometheusGateway->AsyncPush();
650 } else if (numOfEvents < 0) {
651 if (errno == EINTR) {
652 if (mdclog_level_get() >= MDCLOG_DEBUG) {
653 mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
657 mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
662 auto rc = gateWay.get();
664 mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
665 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
666 mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
669 for (auto i = 0; i < numOfEvents; i++) {
670 if (mdclog_level_get() >= MDCLOG_DEBUG) {
671 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
673 clock_gettime(CLOCK_MONOTONIC, &message.message.time);
674 start.tv_sec = message.message.time.tv_sec;
675 start.tv_nsec = message.message.time.tv_nsec;
678 if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP)) {
679 handlepoll_error(events[i], message, rmrMessageBuffer, params);
680 } else if (events[i].events & EPOLLOUT) {
681 handleEinprogressMessages(events[i], message, rmrMessageBuffer, params);
682 } else if (params->listenFD == events[i].data.fd) {
683 if (mdclog_level_get() >= MDCLOG_INFO) {
684 mdclog_write(MDCLOG_INFO, "New connection request from sctp network\n");
686 // new connection is requested from RAN start build connection
688 struct sockaddr in_addr {};
690 char hostBuff[NI_MAXHOST];
691 char portBuff[NI_MAXSERV];
693 in_len = sizeof(in_addr);
694 auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
695 peerInfo->sctpParams = params;
696 peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
697 if (peerInfo->fileDescriptor == -1) {
698 if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
699 /* We have processed all incoming connections. */
702 mdclog_write(MDCLOG_ERR, "Accept error, errno = %s", strerror(errno));
706 if (setSocketNoBlocking(peerInfo->fileDescriptor) == -1) {
707 mdclog_write(MDCLOG_ERR, "setSocketNoBlocking failed to set new connection %s on port %s\n", hostBuff, portBuff);
708 close(peerInfo->fileDescriptor);
711 auto ans = getnameinfo(&in_addr, in_len,
712 peerInfo->hostName, NI_MAXHOST,
713 peerInfo->portNumber, NI_MAXSERV, (unsigned )((unsigned int)NI_NUMERICHOST | (unsigned int)NI_NUMERICSERV));
715 mdclog_write(MDCLOG_ERR, "Failed to get info on connection request. %s\n", strerror(errno));
716 close(peerInfo->fileDescriptor);
719 if (mdclog_level_get() >= MDCLOG_DEBUG) {
720 mdclog_write(MDCLOG_DEBUG, "Accepted connection on descriptor %d (host=%s, port=%s)\n", peerInfo->fileDescriptor, peerInfo->hostName, peerInfo->portNumber);
722 peerInfo->isConnected = false;
723 peerInfo->gotSetup = false;
724 if (addToEpoll(params->epoll_fd,
727 params->sctpMap, nullptr,
733 } else if (params->rmrListenFd == events[i].data.fd) {
734 // got message from XAPP
735 num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
736 num_of_messages.fetch_add(1, std::memory_order_release);
737 if (mdclog_level_get() >= MDCLOG_DEBUG) {
738 mdclog_write(MDCLOG_DEBUG, "new message from RMR");
740 if (receiveXappMessages(params->sctpMap,
742 message.message.time) != 0) {
743 mdclog_write(MDCLOG_ERR, "Error handling Xapp message");
745 } else if (params->inotifyFD == events[i].data.fd) {
746 mdclog_write(MDCLOG_INFO, "Got event from inotify (configuration update)");
747 handleConfigChange(params);
749 /* We RMR_ERR_RETRY have data on the fd waiting to be read. Read and display it.
750 * We must read whatever data is available completely, as we are running
751 * in edge-triggered mode and won't get a notification again for the same data. */
752 num_of_messages.fetch_add(1, std::memory_order_release);
753 if (mdclog_level_get() >= MDCLOG_DEBUG) {
754 mdclog_write(MDCLOG_DEBUG, "new message from SCTP, epoll flags are : %0x", events[i].events);
756 receiveDataFromSctp(&events[i],
758 num_of_SCTP_messages,
760 message.message.time);
763 clock_gettime(CLOCK_MONOTONIC, &end);
764 if (mdclog_level_get() >= MDCLOG_INFO) {
765 totalTime += ((end.tv_sec + 1.0e-9 * end.tv_nsec) -
766 ((double) start.tv_sec + 1.0e-9 * start.tv_nsec));
768 if (mdclog_level_get() >= MDCLOG_DEBUG) {
769 mdclog_write(MDCLOG_DEBUG, "message handling is %ld seconds %ld nanoseconds",
770 end.tv_sec - start.tv_sec,
771 end.tv_nsec - start.tv_nsec);
781 void handleConfigChange(sctp_params_t *sctpParams) {
782 char buf[4096] __attribute__ ((aligned(__alignof__(struct inotify_event))));
783 const struct inotify_event *event;
786 path p = (sctpParams->configFilePath + "/" + sctpParams->configFileName).c_str();
787 auto endlessLoop = true;
788 while (endlessLoop) {
789 auto len = read(sctpParams->inotifyFD, buf, sizeof buf);
791 if (errno != EAGAIN) {
792 mdclog_write(MDCLOG_ERR, "read %s ", strerror(errno));
802 for (ptr = buf; ptr < buf + len; ptr += sizeof(struct inotify_event) + event->len) {
803 event = (const struct inotify_event *)ptr;
804 if (event->mask & (uint32_t)IN_ISDIR) {
808 // the directory name
809 if (sctpParams->inotifyWD == event->wd) {
813 auto retVal = strcmp(sctpParams->configFileName.c_str(), event->name);
818 // only the file we want
819 if (event->mask & (uint32_t)IN_CLOSE_WRITE) {
820 if (mdclog_level_get() >= MDCLOG_INFO) {
821 mdclog_write(MDCLOG_INFO, "Configuration file changed");
824 const int size = 2048;
825 auto fileSize = file_size(p);
826 if (fileSize > size) {
827 mdclog_write(MDCLOG_ERR, "File %s larger than %d", p.string().c_str(), size);
831 mdclog_write(MDCLOG_ERR, "Configuration File %s not exists", p.string().c_str());
836 if (conf.openConfigFile(p.string()) == -1) {
837 mdclog_write(MDCLOG_ERR, "Filed to open config file %s, %s",
838 p.string().c_str(), strerror(errno));
842 auto tmpStr = conf.getStringValue("loglevel");
843 if (tmpStr.length() == 0) {
844 mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
847 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
849 if ((tmpStr.compare("debug")) == 0) {
850 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_DEBUG");
851 sctpParams->logLevel = MDCLOG_DEBUG;
852 } else if ((tmpStr.compare("info")) == 0) {
853 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_INFO");
854 sctpParams->logLevel = MDCLOG_INFO;
855 } else if ((tmpStr.compare("warning")) == 0) {
856 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_WARN");
857 sctpParams->logLevel = MDCLOG_WARN;
858 } else if ((tmpStr.compare("error")) == 0) {
859 mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
860 sctpParams->logLevel = MDCLOG_ERR;
862 mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
863 sctpParams->logLevel = MDCLOG_INFO;
865 mdclog_level_set(sctpParams->logLevel);
868 tmpStr = conf.getStringValue("trace");
869 if (tmpStr.length() == 0) {
870 mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
874 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
875 if ((tmpStr.compare("start")) == 0) {
876 mdclog_write(MDCLOG_INFO, "Trace set to: start");
877 sctpParams->trace = true;
878 } else if ((tmpStr.compare("stop")) == 0) {
879 mdclog_write(MDCLOG_INFO, "Trace set to: stop");
880 sctpParams->trace = false;
882 mdclog_write(MDCLOG_ERR, "Trace was set to wrong value %s, set to stop", tmpStr.c_str());
883 sctpParams->trace = false;
885 jsonTrace = sctpParams->trace;
887 if (strcmp(sctpParams->prometheusMode.c_str(), "push") == 0) {
888 auto timeout = conf.getIntValue("prometheusPushTimeOut");
889 if (timeout >= 5 && timeout <= 300) {
890 sctpParams->epollTimeOut = timeout * 1000;
892 mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
907 * @param rmrMessageBuffer
910 void handleEinprogressMessages(struct epoll_event &event,
911 ReportingMessages_t &message,
912 RmrMessagesBuffer_t &rmrMessageBuffer,
913 sctp_params_t *params) {
914 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
915 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
917 mdclog_write(MDCLOG_INFO, "file descriptor %d got EPOLLOUT", peerInfo->fileDescriptor);
919 socklen_t retValLen = 0;
920 auto rc = getsockopt(peerInfo->fileDescriptor, SOL_SOCKET, SO_ERROR, &retVal, &retValLen);
921 if (rc != 0 || retVal != 0) {
923 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
924 "%s|Failed SCTP Connection, after EINPROGRESS the getsockopt%s",
925 peerInfo->enodbName, strerror(errno));
926 } else if (retVal != 0) {
927 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
928 "%s|Failed SCTP Connection after EINPROGRESS, SO_ERROR",
929 peerInfo->enodbName);
932 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
933 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
934 mdclog_write(MDCLOG_ERR, "%s", rmrMessageBuffer.sendMessage->payload);
935 message.message.direction = 'N';
936 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
937 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
939 memset(peerInfo->asnData, 0, peerInfo->asnLength);
940 peerInfo->asnLength = 0;
945 peerInfo->isConnected = true;
947 if (modifyToEpoll(params->epoll_fd, peerInfo, (EPOLLIN | EPOLLET), params->sctpMap, peerInfo->enodbName,
948 peerInfo->mtype) != 0) {
949 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_MOD");
953 message.message.asndata = (unsigned char *)peerInfo->asnData;
954 message.message.asnLength = peerInfo->asnLength;
955 message.message.messageType = peerInfo->mtype;
956 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
957 num_of_messages.fetch_add(1, std::memory_order_release);
958 if (mdclog_level_get() >= MDCLOG_DEBUG) {
959 mdclog_write(MDCLOG_DEBUG, "send the delayed SETUP/ENDC SETUP to sctp for %s",
960 message.message.enodbName);
962 if (sendSctpMsg(peerInfo, message, params->sctpMap) != 0) {
963 if (mdclog_level_get() >= MDCLOG_DEBUG) {
964 mdclog_write(MDCLOG_DEBUG, "Error write to SCTP %s %d", __func__, __LINE__);
969 memset(peerInfo->asnData, 0, peerInfo->asnLength);
970 peerInfo->asnLength = 0;
975 void handlepoll_error(struct epoll_event &event,
976 ReportingMessages_t &message,
977 RmrMessagesBuffer_t &rmrMessageBuffer,
978 sctp_params_t *params) {
979 if (event.data.fd != params->rmrListenFd) {
980 auto *peerInfo = (ConnectedCU_t *)event.data.ptr;
981 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on fd %d, RAN NAME : %s",
982 event.events, peerInfo->fileDescriptor, peerInfo->enodbName);
984 rmrMessageBuffer.sendMessage->len = snprintf((char *)rmrMessageBuffer.sendMessage->payload, 256,
985 "%s|Failed SCTP Connection",
986 peerInfo->enodbName);
987 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
988 message.message.asnLength = rmrMessageBuffer.sendMessage->len;
990 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
991 message.message.direction = 'N';
992 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
993 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
996 close(peerInfo->fileDescriptor);
997 params->sctpMap->erase(peerInfo->enodbName);
998 cleanHashEntry((ConnectedCU_t *) event.data.ptr, params->sctpMap);
1000 mdclog_write(MDCLOG_ERR, "epoll error, events %0x on RMR FD", event.events);
1008 int setSocketNoBlocking(int socket) {
1009 auto flags = fcntl(socket, F_GETFL, 0);
1012 mdclog_mdc_add("func", "fcntl");
1013 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1018 flags = (unsigned) flags | (unsigned) O_NONBLOCK;
1019 if (fcntl(socket, F_SETFL, flags) == -1) {
1020 mdclog_mdc_add("func", "fcntl");
1021 mdclog_write(MDCLOG_ERR, "%s, %s", __FUNCTION__, strerror(errno));
1034 void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
1036 auto port = (uint16_t) strtol(val->portNumber, &dummy, 10);
1037 char searchBuff[2048]{};
1039 snprintf(searchBuff, sizeof searchBuff, "host:%s:%d", val->hostName, port);
1040 m->erase(searchBuff);
1042 m->erase(val->enodbName);
1048 * @param fd file discriptor
1049 * @param data the asn data to send
1050 * @param len length of the data
1051 * @param enodbName the enodbName as in the map for printing purpose
1052 * @param m map host information
1053 * @param mtype message number
1054 * @return 0 success, anegative number on fail
1056 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
1057 auto loglevel = mdclog_level_get();
1058 int fd = peerInfo->fileDescriptor;
1059 if (loglevel >= MDCLOG_DEBUG) {
1060 mdclog_write(MDCLOG_DEBUG, "Send SCTP message for CU %s, %s",
1061 message.message.enodbName, __FUNCTION__);
1065 if (send(fd,message.message.asndata, message.message.asnLength,MSG_NOSIGNAL) < 0) {
1066 if (errno == EINTR) {
1069 mdclog_write(MDCLOG_ERR, "error writing to CU a message, %s ", strerror(errno));
1070 if (!peerInfo->isConnected) {
1071 mdclog_write(MDCLOG_ERR, "connection to CU %s is still in progress.", message.message.enodbName);
1074 cleanHashEntry(peerInfo, m);
1076 char key[MAX_ENODB_NAME_SIZE * 2];
1077 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", message.message.enodbName,
1078 message.message.messageType);
1079 if (loglevel >= MDCLOG_DEBUG) {
1080 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
1082 auto tmp = m->find(key);
1089 message.message.direction = 'D';
1090 // send report.buffer of size
1091 buildJsonMessage(message);
1093 if (loglevel >= MDCLOG_DEBUG) {
1094 mdclog_write(MDCLOG_DEBUG,
1095 "SCTP message for CU %s sent from %s",
1096 message.message.enodbName,
1106 * @param rmrMessageBuffer
1108 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1109 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
1110 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
1112 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1113 mdclog_write(MDCLOG_DEBUG, "Message from Xapp RAN name = %s message length = %ld",
1114 message.message.enodbName, (unsigned long) message.message.asnLength);
1124 * @param numOfMessages
1125 * @param rmrMessageBuffer
1129 int receiveDataFromSctp(struct epoll_event *events,
1130 Sctp_Map_t *sctpMap,
1132 RmrMessagesBuffer_t &rmrMessageBuffer,
1133 struct timespec &ts) {
1134 /* We have data on the fd waiting to be read. Read and display it.
1135 * We must read whatever data is available completely, as we are running
1136 * in edge-triggered mode and won't get a notification again for the same data. */
1137 ReportingMessages_t message {};
1139 auto loglevel = mdclog_level_get();
1141 // get the identity of the interface
1142 message.peerInfo = (ConnectedCU_t *)events->data.ptr;
1144 struct timespec start{0, 0};
1145 struct timespec decodestart{0, 0};
1146 struct timespec end{0, 0};
1148 E2AP_PDU_t *pdu = nullptr;
1151 if (loglevel >= MDCLOG_DEBUG) {
1152 mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
1153 clock_gettime(CLOCK_MONOTONIC, &start);
1155 // read the buffer directly to rmr payload
1156 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1157 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1158 read(message.peerInfo->fileDescriptor, rmrMessageBuffer.sendMessage->payload, RECEIVE_SCTP_BUFFER_SIZE);
1160 if (loglevel >= MDCLOG_DEBUG) {
1161 mdclog_write(MDCLOG_DEBUG, "Finish Read from SCTP %d fd message length = %ld",
1162 message.peerInfo->fileDescriptor, message.message.asnLength);
1165 memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
1166 message.message.direction = 'U';
1167 message.message.time.tv_nsec = ts.tv_nsec;
1168 message.message.time.tv_sec = ts.tv_sec;
1170 if (message.message.asnLength < 0) {
1171 if (errno == EINTR) {
1174 /* If errno == EAGAIN, that means we have read all
1175 data. So goReportingMessages_t back to the main loop. */
1176 if (errno != EAGAIN) {
1177 mdclog_write(MDCLOG_ERR, "Read error, %s ", strerror(errno));
1179 } else if (loglevel >= MDCLOG_DEBUG) {
1180 mdclog_write(MDCLOG_DEBUG, "EAGAIN - descriptor = %d", message.peerInfo->fileDescriptor);
1183 } else if (message.message.asnLength == 0) {
1184 /* End of file. The remote has closed the connection. */
1185 if (loglevel >= MDCLOG_INFO) {
1186 mdclog_write(MDCLOG_INFO, "END of File Closed connection - descriptor = %d",
1187 message.peerInfo->fileDescriptor);
1193 if (loglevel >= MDCLOG_DEBUG) {
1194 char printBuffer[4096]{};
1195 char *tmp = printBuffer;
1196 for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
1197 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
1200 printBuffer[message.message.asnLength] = 0;
1201 clock_gettime(CLOCK_MONOTONIC, &end);
1202 mdclog_write(MDCLOG_DEBUG, "Before Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1203 message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1204 mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data = : %s", message.message.asnLength,
1206 clock_gettime(CLOCK_MONOTONIC, &decodestart);
1209 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1210 message.message.asndata, message.message.asnLength);
1211 if (rval.code != RC_OK) {
1212 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
1213 message.peerInfo->enodbName);
1217 if (loglevel >= MDCLOG_DEBUG) {
1218 clock_gettime(CLOCK_MONOTONIC, &end);
1219 mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
1220 message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1223 FILE *stream = open_memstream(&printBuffer, &size);
1224 asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
1225 mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
1226 clock_gettime(CLOCK_MONOTONIC, &decodestart);
1229 switch (pdu->present) {
1230 case E2AP_PDU_PR_initiatingMessage: {//initiating message
1231 asnInitiatingRequest(pdu, sctpMap,message, rmrMessageBuffer);
1234 case E2AP_PDU_PR_successfulOutcome: { //successful outcome
1235 asnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1238 case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
1239 asnUnSuccsesfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
1243 mdclog_write(MDCLOG_ERR, "Unknown index %d in E2AP PDU", pdu->present);
1246 if (loglevel >= MDCLOG_DEBUG) {
1247 clock_gettime(CLOCK_MONOTONIC, &end);
1248 mdclog_write(MDCLOG_DEBUG,
1249 "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
1250 message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
1253 if (pdu != nullptr) {
1254 ASN_STRUCT_RESET(asn_DEF_E2AP_PDU, pdu);
1255 //ASN_STRUCT_FREE(asn_DEF_E2AP_PDU, pdu);
1261 if (loglevel >= MDCLOG_INFO) {
1262 mdclog_write(MDCLOG_INFO, "Closed connection - descriptor = %d", message.peerInfo->fileDescriptor);
1264 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
1265 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
1267 "%s|CU disconnected unexpectedly",
1268 message.peerInfo->enodbName);
1269 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
1271 if (sendRequestToXapp(message,
1272 RIC_SCTP_CONNECTION_FAILURE,
1273 rmrMessageBuffer) != 0) {
1274 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
1277 /* Closing descriptor make epoll remove it from the set of descriptors which are monitored. */
1278 close(message.peerInfo->fileDescriptor);
1279 cleanHashEntry((ConnectedCU_t *) events->data.ptr, sctpMap);
1281 if (loglevel >= MDCLOG_DEBUG) {
1282 clock_gettime(CLOCK_MONOTONIC, &end);
1283 mdclog_write(MDCLOG_DEBUG, "from receive SCTP to send RMR time is %ld seconds and %ld nanoseconds",
1284 end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
1290 static void buildAndsendSetupRequest(ReportingMessages_t &message,
1291 RmrMessagesBuffer_t &rmrMessageBuffer,
1293 string const &messageName,
1294 string const &ieName,
1295 vector<string> &functionsToAdd_v,
1296 vector<string> &functionsToModified_v*/) {
1297 auto logLevel = mdclog_level_get();
1298 // now we can send the data to e2Mgr
1301 auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1302 unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1304 er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
1305 if (er.encoded == -1) {
1306 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1308 } else if (er.encoded > (ssize_t) buffer_size) {
1309 buffer_size = er.encoded + 128;
1310 mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
1312 asn_DEF_E2AP_PDU.name, buffer_size);
1313 buffer_size = er.encoded + 128;
1317 buffer[er.encoded] = '\0';
1322 string res((char *)buffer);
1323 res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
1324 res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
1325 res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
1328 // if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
1329 // res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
1332 // if (res.length() == 0) {
1333 // rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
1334 // rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
1335 // message.peerInfo->sctpParams->myIP.c_str(),
1336 // message.peerInfo->sctpParams->rmrPort,
1339 rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
1340 rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
1341 message.peerInfo->sctpParams->myIP.c_str(),
1342 message.peerInfo->sctpParams->rmrPort,
1346 if (logLevel >= MDCLOG_DEBUG) {
1347 mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
1350 rmrMsg->mtype = message.message.messageType;
1352 rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
1354 static unsigned char tx[32];
1355 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1356 rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
1358 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1359 if (rmrMsg == nullptr) {
1360 mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
1361 } else if (rmrMsg->state != 0) {
1362 char meid[RMR_MAX_MEID]{};
1363 if (rmrMsg->state == RMR_ERR_RETRY) {
1366 mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
1367 rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
1368 rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
1369 if (rmrMsg == nullptr) {
1370 mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
1371 } else if (rmrMsg->state != 0) {
1372 mdclog_write(MDCLOG_ERR,
1373 "RMR Retry failed %s sending request %d to Xapp from %s",
1374 translateRmrErrorMessages(rmrMsg->state).c_str(),
1376 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1379 mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
1380 translateRmrErrorMessages(rmrMsg->state).c_str(),
1382 rmr_get_meid(rmrMsg, (unsigned char *) meid));
1385 message.peerInfo->gotSetup = true;
1386 buildJsonMessage(message);
1387 if (rmrMsg != nullptr) {
1388 rmr_free_msg(rmrMsg);
1393 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
1395 runFunXML_v.clear();
1396 for (auto j = 0; j < list.list.count; j++) {
1397 auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
1398 if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
1399 (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
1401 E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
1402 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
1403 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1404 (void **)&ranFunDef,
1405 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
1406 raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
1407 if (rval.code != RC_OK) {
1408 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
1410 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
1414 auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
1415 unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
1416 memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
1418 auto er = asn_encode_to_buffer(nullptr,
1420 &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
1424 if (er.encoded == -1) {
1425 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
1426 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1428 } else if (er.encoded > (ssize_t)xml_buffer_size) {
1429 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1430 (int) xml_buffer_size,
1431 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
1433 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1434 mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
1435 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
1440 string runFuncs = (char *)(xml_buffer);
1441 runFunXML_v.emplace_back(runFuncs);
1448 int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
1449 Sctp_Map_t *sctpMap,
1450 ReportingMessages_t &message,
1451 vector <string> &RANfunctionsAdded_v,
1452 vector <string> &RANfunctionsModified_v) {
1453 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1454 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
1455 auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
1456 if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1457 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
1458 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1459 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1460 ie->value.choice.RANfunctions_List.list.count);
1462 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1466 } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
1467 if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
1468 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1469 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1470 ie->value.choice.RANfunctions_List.list.count);
1472 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
1478 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1479 mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1480 RANfunctionsAdded_v.size());
1488 void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
1489 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
1490 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
1492 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
1493 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
1495 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
1496 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
1498 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
1499 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
1501 peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
1502 peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
1503 // ---------------------------------------------
1504 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
1505 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
1507 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
1508 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
1510 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
1511 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
1513 peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
1514 peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
1515 //-------------------------------------------------------------
1517 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
1518 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
1520 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
1521 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
1523 peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
1524 peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
1526 //====================================================================================
1527 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
1528 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
1530 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
1531 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
1533 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
1534 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
1536 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
1537 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
1539 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
1540 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
1542 peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
1543 peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
1544 //---------------------------------------------------------------------------------------------------------
1545 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
1546 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
1548 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
1549 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
1551 peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
1552 peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
1553 //----------------------------------------------------------------------------------------------------------------
1554 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
1555 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
1557 peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
1558 peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
1565 * @param RANfunctionsAdded_v
1568 int collectSetupRequestData(E2AP_PDU_t *pdu,
1569 Sctp_Map_t *sctpMap,
1570 ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
1571 memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
1572 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
1573 auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
1574 if (ie->id == ProtocolIE_ID_id_GlobalE2node_ID) {
1575 // get the ran name for meid
1576 if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
1577 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
1578 mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
1579 // no mesage will be sent
1583 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
1584 sctpMap->setkey(message.message.enodbName, message.peerInfo);
1586 } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
1587 if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
1588 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1589 mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
1590 ie->value.choice.RANfunctions_List.list.count);
1592 if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
1598 // if (mdclog_level_get() >= MDCLOG_DEBUG) {
1599 // mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
1600 // RANfunctionsAdded_v.size());
1605 int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
1606 E2AP_PDU_t *pdu = nullptr;
1608 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1609 mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
1610 rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
1612 auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
1613 rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
1614 if (rval.code != RC_OK) {
1615 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
1617 message.message.enodbName);
1621 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
1622 auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
1623 rmrMessageBuffer.sendMessage->payload, buff_size);
1624 if (er.encoded == -1) {
1625 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
1627 } else if (er.encoded > (ssize_t)buff_size) {
1628 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
1629 (int)rmrMessageBuffer.sendMessage->len,
1630 asn_DEF_E2AP_PDU.name,
1635 rmrMessageBuffer.sendMessage->len = er.encoded;
1644 * @param rmrMessageBuffer
1646 void asnInitiatingRequest(E2AP_PDU_t *pdu,
1647 Sctp_Map_t *sctpMap,
1648 ReportingMessages_t &message,
1649 RmrMessagesBuffer_t &rmrMessageBuffer) {
1650 auto logLevel = mdclog_level_get();
1651 auto procedureCode = ((InitiatingMessage_t *) pdu->choice.initiatingMessage)->procedureCode;
1652 if (logLevel >= MDCLOG_DEBUG) {
1653 mdclog_write(MDCLOG_DEBUG, "Initiating message %ld\n", procedureCode);
1655 switch (procedureCode) {
1656 case ProcedureCode_id_E2setup: {
1657 if (logLevel >= MDCLOG_DEBUG) {
1658 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
1661 // vector <string> RANfunctionsAdded_v;
1662 // vector <string> RANfunctionsModified_v;
1663 // RANfunctionsAdded_v.clear();
1664 // RANfunctionsModified_v.clear();
1665 if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
1669 buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
1671 string messageName("E2setupRequest");
1672 string ieName("E2setupRequestIEs");
1673 message.message.messageType = RIC_E2_SETUP_REQ;
1674 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
1675 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
1676 buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1679 case ProcedureCode_id_RICserviceUpdate: {
1680 if (logLevel >= MDCLOG_DEBUG) {
1681 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
1683 // vector <string> RANfunctionsAdded_v;
1684 // vector <string> RANfunctionsModified_v;
1685 // RANfunctionsAdded_v.clear();
1686 // RANfunctionsModified_v.clear();
1687 // if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
1688 // RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
1692 string messageName("RICserviceUpdate");
1693 string ieName("RICserviceUpdateIEs");
1694 message.message.messageType = RIC_SERVICE_UPDATE;
1695 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
1696 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
1698 buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
1701 case ProcedureCode_id_ErrorIndication: {
1702 if (logLevel >= MDCLOG_DEBUG) {
1703 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
1705 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
1706 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
1707 if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
1708 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
1712 case ProcedureCode_id_Reset: {
1713 if (logLevel >= MDCLOG_DEBUG) {
1714 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1717 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1718 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1719 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1723 if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
1724 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
1728 case ProcedureCode_id_RICindication: {
1729 if (logLevel >= MDCLOG_DEBUG) {
1730 mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
1732 for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
1733 auto messageSent = false;
1734 RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
1735 if (logLevel >= MDCLOG_DEBUG) {
1736 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1738 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1739 if (logLevel >= MDCLOG_DEBUG) {
1740 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1742 if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
1743 static unsigned char tx[32];
1744 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
1745 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1746 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1747 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1748 (unsigned char *)message.message.enodbName,
1749 strlen(message.message.enodbName));
1750 rmrMessageBuffer.sendMessage->state = 0;
1751 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1753 //ie->value.choice.RICrequestID.ricInstanceID;
1754 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1755 mdclog_write(MDCLOG_DEBUG, "sub id = %d, mtype = %d, ric instance id %ld, requestor id = %ld",
1756 rmrMessageBuffer.sendMessage->sub_id,
1757 rmrMessageBuffer.sendMessage->mtype,
1758 ie->value.choice.RICrequestID.ricInstanceID,
1759 ie->value.choice.RICrequestID.ricRequestorID);
1761 message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
1762 message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
1763 sendRmrMessage(rmrMessageBuffer, message);
1766 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1776 mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
1777 message.message.messageType = 0; // no RMR message type yet
1779 buildJsonMessage(message);
1790 * @param rmrMessageBuffer
1792 void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
1793 Sctp_Map_t *sctpMap,
1794 ReportingMessages_t &message,
1795 RmrMessagesBuffer_t &rmrMessageBuffer) {
1796 auto procedureCode = pdu->choice.successfulOutcome->procedureCode;
1797 auto logLevel = mdclog_level_get();
1798 if (logLevel >= MDCLOG_INFO) {
1799 mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
1801 switch (procedureCode) {
1802 case ProcedureCode_id_Reset: {
1803 if (logLevel >= MDCLOG_DEBUG) {
1804 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
1806 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
1807 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
1808 if (XML_From_PER(message, rmrMessageBuffer) < 0) {
1811 if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
1812 mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
1816 case ProcedureCode_id_RICcontrol: {
1817 if (logLevel >= MDCLOG_DEBUG) {
1818 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1821 i < pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.count; i++) {
1822 auto messageSent = false;
1823 RICcontrolAcknowledge_IEs_t *ie = pdu->choice.successfulOutcome->value.choice.RICcontrolAcknowledge.protocolIEs.list.array[i];
1824 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1825 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1827 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1828 if (mdclog_level_get() >= MDCLOG_DEBUG) {
1829 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1831 if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
1832 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
1833 rmrMessageBuffer.sendMessage->state = 0;
1834 // rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
1835 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1837 static unsigned char tx[32];
1838 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1839 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1840 rmr_bytes2meid(rmrMessageBuffer.sendMessage,
1841 (unsigned char *)message.message.enodbName,
1842 strlen(message.message.enodbName));
1844 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1845 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1846 sendRmrMessage(rmrMessageBuffer, message);
1849 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1859 case ProcedureCode_id_RICsubscription: {
1860 if (logLevel >= MDCLOG_DEBUG) {
1861 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1863 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1864 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1865 if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
1866 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
1870 case ProcedureCode_id_RICsubscriptionDelete: {
1871 if (logLevel >= MDCLOG_DEBUG) {
1872 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1874 message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1875 message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1876 if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
1877 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
1882 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1883 message.message.messageType = 0; // no RMR message type yet
1884 buildJsonMessage(message);
1895 * @param rmrMessageBuffer
1897 void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
1898 Sctp_Map_t *sctpMap,
1899 ReportingMessages_t &message,
1900 RmrMessagesBuffer_t &rmrMessageBuffer) {
1901 auto procedureCode = pdu->choice.unsuccessfulOutcome->procedureCode;
1902 auto logLevel = mdclog_level_get();
1903 if (logLevel >= MDCLOG_INFO) {
1904 mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
1906 switch (procedureCode) {
1907 case ProcedureCode_id_RICcontrol: {
1908 if (logLevel >= MDCLOG_DEBUG) {
1909 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
1912 i < pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.count; i++) {
1913 auto messageSent = false;
1914 RICcontrolFailure_IEs_t *ie = pdu->choice.unsuccessfulOutcome->value.choice.RICcontrolFailure.protocolIEs.list.array[i];
1915 if (logLevel >= MDCLOG_DEBUG) {
1916 mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
1918 if (ie->id == ProtocolIE_ID_id_RICrequestID) {
1919 if (logLevel >= MDCLOG_DEBUG) {
1920 mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
1922 if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
1923 message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
1924 rmrMessageBuffer.sendMessage->state = 0;
1925 // rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
1926 rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
1927 static unsigned char tx[32];
1928 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1929 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
1930 rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
1931 strlen(message.message.enodbName));
1932 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
1933 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
1934 sendRmrMessage(rmrMessageBuffer, message);
1937 mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
1946 case ProcedureCode_id_RICsubscription: {
1947 if (logLevel >= MDCLOG_DEBUG) {
1948 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
1950 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
1951 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
1952 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1953 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
1957 case ProcedureCode_id_RICsubscriptionDelete: {
1958 if (logLevel >= MDCLOG_DEBUG) {
1959 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
1961 message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
1962 message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
1963 if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
1964 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
1969 mdclog_write(MDCLOG_WARN, "Undefined or not supported message = %ld", procedureCode);
1970 message.message.messageType = 0; // no RMR message type yet
1972 buildJsonMessage(message);
1983 * @param rmrMmessageBuffer
1986 int sendRequestToXapp(ReportingMessages_t &message,
1988 RmrMessagesBuffer_t &rmrMmessageBuffer) {
1989 rmr_bytes2meid(rmrMmessageBuffer.sendMessage,
1990 (unsigned char *)message.message.enodbName,
1991 strlen(message.message.enodbName));
1992 message.message.messageType = rmrMmessageBuffer.sendMessage->mtype = requestId;
1993 rmrMmessageBuffer.sendMessage->state = 0;
1994 static unsigned char tx[32];
1995 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
1996 rmr_bytes2xact(rmrMmessageBuffer.sendMessage, tx, strlen((const char *) tx));
1998 auto rc = sendRmrMessage(rmrMmessageBuffer, message);
2004 * @param pSctpParams
2006 void getRmrContext(sctp_params_t &pSctpParams) {
2007 pSctpParams.rmrCtx = nullptr;
2008 pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
2009 if (pSctpParams.rmrCtx == nullptr) {
2010 mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
2014 rmr_set_stimeout(pSctpParams.rmrCtx, 0); // disable retries for any send operation
2015 // we need to find that routing table exist and we can run
2016 if (mdclog_level_get() >= MDCLOG_INFO) {
2017 mdclog_write(MDCLOG_INFO, "We are after RMR INIT wait for RMR_Ready");
2022 if ((rmrReady = rmr_ready(pSctpParams.rmrCtx)) == 0) {
2026 if (count % 60 == 0) {
2027 mdclog_write(MDCLOG_INFO, "waiting to RMR ready state for %d seconds", count);
2030 if (mdclog_level_get() >= MDCLOG_INFO) {
2031 mdclog_write(MDCLOG_INFO, "RMR running");
2033 rmr_init_trace(pSctpParams.rmrCtx, 200);
2034 // get the RMR fd for the epoll
2035 pSctpParams.rmrListenFd = rmr_get_rcvfd(pSctpParams.rmrCtx);
2036 struct epoll_event event{};
2037 // add RMR fd to epoll
2038 event.events = (EPOLLIN);
2039 event.data.fd = pSctpParams.rmrListenFd;
2040 // add listening RMR FD to epoll
2041 if (epoll_ctl(pSctpParams.epoll_fd, EPOLL_CTL_ADD, pSctpParams.rmrListenFd, &event)) {
2042 mdclog_write(MDCLOG_ERR, "Failed to add RMR descriptor to epoll");
2043 close(pSctpParams.rmrListenFd);
2044 rmr_close(pSctpParams.rmrCtx);
2045 pSctpParams.rmrCtx = nullptr;
2052 * @param rmrMessageBuffer
2055 int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
2056 E2AP_PDU_t *pdu = nullptr;
2058 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2059 mdclog_write(MDCLOG_DEBUG, "got xml Format data from xApp of size %d is:%s",
2060 rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
2062 auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
2063 rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
2064 if (rval.code != RC_OK) {
2065 mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response from E2MGR : %s",
2067 message.message.enodbName);
2071 int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
2072 auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
2073 rmrMessageBuffer.rcvMessage->payload, buff_size);
2074 if (er.encoded == -1) {
2075 mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
2077 } else if (er.encoded > (ssize_t)buff_size) {
2078 mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
2079 (int)rmrMessageBuffer.rcvMessage->len,
2080 asn_DEF_E2AP_PDU.name,
2085 rmrMessageBuffer.rcvMessage->len = er.encoded;
2092 * @param rmrMessageBuffer
2096 int receiveXappMessages(Sctp_Map_t *sctpMap,
2097 RmrMessagesBuffer_t &rmrMessageBuffer,
2098 struct timespec &ts) {
2099 if (rmrMessageBuffer.rcvMessage == nullptr) {
2101 mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
2105 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2106 mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
2108 rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
2109 if (rmrMessageBuffer.rcvMessage == nullptr) {
2110 mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
2111 rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2114 ReportingMessages_t message;
2115 message.message.direction = 'D';
2116 message.message.time.tv_nsec = ts.tv_nsec;
2117 message.message.time.tv_sec = ts.tv_sec;
2119 // get message payload
2120 //auto msgData = msg->payload;
2121 if (rmrMessageBuffer.rcvMessage->state != 0) {
2122 mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
2125 rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
2126 message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
2127 if (message.peerInfo == nullptr) {
2128 auto type = rmrMessageBuffer.rcvMessage->mtype;
2130 case RIC_SCTP_CLEAR_ALL:
2131 case E2_TERM_KEEP_ALIVE_REQ:
2132 case RIC_HEALTH_CHECK_REQ:
2135 mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
2140 switch (rmrMessageBuffer.rcvMessage->mtype) {
2141 case RIC_E2_SETUP_RESP : {
2142 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2145 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2146 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2147 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2148 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
2153 case RIC_E2_SETUP_FAILURE : {
2154 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2157 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
2158 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
2159 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2160 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
2165 case RIC_ERROR_INDICATION: {
2166 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
2167 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
2168 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2169 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
2175 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
2176 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
2177 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2178 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
2183 case RIC_SUB_DEL_REQ: {
2184 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
2185 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
2186 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2187 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
2192 case RIC_CONTROL_REQ: {
2193 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
2194 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
2195 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2196 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
2201 case RIC_SERVICE_QUERY: {
2202 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2205 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
2206 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2207 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2208 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
2213 case RIC_SERVICE_UPDATE_ACK: {
2214 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2217 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2218 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2219 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2220 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
2225 case RIC_SERVICE_UPDATE_FAILURE: {
2226 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2229 message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
2230 message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
2231 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2232 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
2237 case RIC_E2_RESET_REQ: {
2238 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2241 message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2242 message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2243 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2244 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
2249 case RIC_E2_RESET_RESP: {
2250 if (PER_FromXML(message, rmrMessageBuffer) != 0) {
2253 message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
2254 message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
2255 if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
2256 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
2261 case RIC_SCTP_CLEAR_ALL: {
2262 mdclog_write(MDCLOG_INFO, "RIC_SCTP_CLEAR_ALL");
2263 // loop on all keys and close socket and then erase all map.
2265 sctpMap->getKeys(v);
2266 for (auto const &iter : v) { //}; iter != sctpMap.end(); iter++) {
2267 if (!boost::starts_with((string) (iter), "host:") && !boost::starts_with((string) (iter), "msg:")) {
2268 auto *peerInfo = (ConnectedCU_t *) sctpMap->find(iter);
2269 if (peerInfo == nullptr) {
2272 close(peerInfo->fileDescriptor);
2273 memcpy(message.message.enodbName, peerInfo->enodbName, sizeof(peerInfo->enodbName));
2274 message.message.direction = 'D';
2275 message.message.time.tv_nsec = ts.tv_nsec;
2276 message.message.time.tv_sec = ts.tv_sec;
2278 message.message.asnLength = rmrMessageBuffer.sendMessage->len =
2279 snprintf((char *)rmrMessageBuffer.sendMessage->payload,
2281 "%s|RIC_SCTP_CLEAR_ALL",
2282 peerInfo->enodbName);
2283 message.message.asndata = rmrMessageBuffer.sendMessage->payload;
2284 mdclog_write(MDCLOG_INFO, "%s", message.message.asndata);
2285 if (sendRequestToXapp(message, RIC_SCTP_CONNECTION_FAILURE, rmrMessageBuffer) != 0) {
2286 mdclog_write(MDCLOG_ERR, "SCTP_CONNECTION_FAIL message failed to send to xAPP");
2296 case E2_TERM_KEEP_ALIVE_REQ: {
2297 // send message back
2298 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2299 (unsigned char *)rmrMessageBuffer.ka_message,
2300 rmrMessageBuffer.ka_message_len);
2301 rmrMessageBuffer.sendMessage->mtype = E2_TERM_KEEP_ALIVE_RESP;
2302 rmrMessageBuffer.sendMessage->state = 0;
2303 static unsigned char tx[32];
2304 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2305 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2306 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2307 if (rmrMessageBuffer.sendMessage == nullptr) {
2308 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2309 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP RMR message returned NULL");
2310 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2311 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
2312 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2313 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2314 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
2319 case RIC_HEALTH_CHECK_REQ: {
2320 // send message back
2321 rmr_bytes2payload(rmrMessageBuffer.sendMessage,
2322 (unsigned char *)"OK",
2324 rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
2325 rmrMessageBuffer.sendMessage->state = 0;
2326 static unsigned char tx[32];
2327 auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2328 rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
2329 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2330 if (rmrMessageBuffer.sendMessage == nullptr) {
2331 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2332 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
2333 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2334 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
2335 rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
2336 } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
2337 mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
2344 mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
2345 message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
2346 message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
2347 message.message.time.tv_nsec = ts.tv_nsec;
2348 message.message.time.tv_sec = ts.tv_sec;
2349 message.message.messageType = rmrMessageBuffer.rcvMessage->mtype;
2351 buildJsonMessage(message);
2356 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2357 mdclog_write(MDCLOG_DEBUG, "EXIT OK from %s", __FUNCTION__);
2363 * Send message to the CU that is not expecting for successful or unsuccessful results
2364 * @param messageBuffer
2366 * @param failedMsgId
2370 int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
2371 ReportingMessages_t &message,
2373 Sctp_Map_t *sctpMap) {
2375 getRequestMetaData(message, messageBuffer);
2376 if (mdclog_level_get() >= MDCLOG_INFO) {
2377 mdclog_write(MDCLOG_INFO, "send message to %s address", message.message.enodbName);
2380 auto rc = sendMessagetoCu(sctpMap, messageBuffer, message, failedMsgId);
2387 * @param messageBuffer
2389 * @param failedMesgId
2392 int sendMessagetoCu(Sctp_Map_t *sctpMap,
2393 RmrMessagesBuffer_t &messageBuffer,
2394 ReportingMessages_t &message,
2397 message.message.messageType = messageBuffer.rcvMessage->mtype;
2398 auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
2404 * @param rmrCtx the rmr context to send and receive
2405 * @param msg the msg we got fromxApp
2406 * @param metaData data from xApp in ordered struct
2407 * @param failedMesgId the return message type error
2410 sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
2411 rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
2412 msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
2413 message.message.enodbName);
2414 if (mdclog_level_get() >= MDCLOG_INFO) {
2415 mdclog_write(MDCLOG_INFO, "%s", msg->payload);
2417 msg->mtype = failedMesgId;
2420 static unsigned char tx[32];
2421 snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
2422 rmr_bytes2xact(msg, tx, strlen((const char *) tx));
2424 sendRmrMessage(rmrMessageBuffer, message);
2439 int addToEpoll(int epoll_fd,
2440 ConnectedCU_t *peerInfo,
2442 Sctp_Map_t *sctpMap,
2446 struct epoll_event event{};
2447 event.data.ptr = peerInfo;
2448 event.events = events;
2449 if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
2450 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2451 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
2452 strerror(errno), __func__, __LINE__);
2454 close(peerInfo->fileDescriptor);
2455 if (enodbName != nullptr) {
2456 cleanHashEntry(peerInfo, sctpMap);
2457 char key[MAX_ENODB_NAME_SIZE * 2];
2458 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2459 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2460 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2462 auto tmp = sctpMap->find(key);
2465 sctpMap->erase(key);
2468 peerInfo->enodbName[0] = 0;
2470 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2486 int modifyToEpoll(int epoll_fd,
2487 ConnectedCU_t *peerInfo,
2489 Sctp_Map_t *sctpMap,
2493 struct epoll_event event{};
2494 event.data.ptr = peerInfo;
2495 event.events = events;
2496 if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
2497 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2498 mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
2499 strerror(errno), __func__, __LINE__);
2501 close(peerInfo->fileDescriptor);
2502 cleanHashEntry(peerInfo, sctpMap);
2503 char key[MAX_ENODB_NAME_SIZE * 2];
2504 snprintf(key, MAX_ENODB_NAME_SIZE * 2, "msg:%s|%d", enodbName, msgType);
2505 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2506 mdclog_write(MDCLOG_DEBUG, "remove key = %s from %s at line %d", key, __FUNCTION__, __LINE__);
2508 auto tmp = sctpMap->find(key);
2512 sctpMap->erase(key);
2513 mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
2520 int sendRmrMessage(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message) {
2521 buildJsonMessage(message);
2523 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2525 if (rmrMessageBuffer.sendMessage == nullptr) {
2526 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2527 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2531 if (rmrMessageBuffer.sendMessage->state != 0) {
2532 char meid[RMR_MAX_MEID]{};
2533 if (rmrMessageBuffer.sendMessage->state == RMR_ERR_RETRY) {
2535 rmrMessageBuffer.sendMessage->state = 0;
2536 mdclog_write(MDCLOG_INFO, "RETRY sending Message type %d to Xapp from %s",
2537 rmrMessageBuffer.sendMessage->mtype,
2538 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2539 rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
2540 if (rmrMessageBuffer.sendMessage == nullptr) {
2541 mdclog_write(MDCLOG_ERR, "RMR failed send message returned with NULL pointer");
2542 rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
2544 } else if (rmrMessageBuffer.sendMessage->state != 0) {
2545 mdclog_write(MDCLOG_ERR,
2546 "Message state %s while sending request %d to Xapp from %s after retry of 10 microseconds",
2547 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2548 rmrMessageBuffer.sendMessage->mtype,
2549 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2550 auto rc = rmrMessageBuffer.sendMessage->state;
2554 mdclog_write(MDCLOG_ERR, "Message state %s while sending request %d to Xapp from %s",
2555 translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str(),
2556 rmrMessageBuffer.sendMessage->mtype,
2557 rmr_get_meid(rmrMessageBuffer.sendMessage, (unsigned char *)meid));
2558 return rmrMessageBuffer.sendMessage->state;
2564 void buildJsonMessage(ReportingMessages_t &message) {
2566 message.outLen = sizeof(message.base64Data);
2567 base64::encode((const unsigned char *) message.message.asndata,
2568 (const int) message.message.asnLength,
2571 if (mdclog_level_get() >= MDCLOG_DEBUG) {
2572 mdclog_write(MDCLOG_DEBUG, "Tracing: ASN length = %d, base64 message length = %d ",
2573 (int) message.message.asnLength,
2574 (int) message.outLen);
2577 snprintf(message.buffer, sizeof(message.buffer),
2578 "{\"header\": {\"ts\": \"%ld.%09ld\","
2579 "\"ranName\": \"%s\","
2580 "\"messageType\": %d,"
2581 "\"direction\": \"%c\"},"
2582 "\"base64Length\": %d,"
2583 "\"asnBase64\": \"%s\"}",
2584 message.message.time.tv_sec,
2585 message.message.time.tv_nsec,
2586 message.message.enodbName,
2587 message.message.messageType,
2588 message.message.direction,
2589 (int) message.outLen,
2590 message.base64Data);
2591 static src::logger_mt &lg = my_logger::get();
2593 BOOST_LOG(lg) << message.buffer;
2599 * take RMR error code to string
2603 string translateRmrErrorMessages(int state) {
2607 str = "RMR_OK - state is good";
2609 case RMR_ERR_BADARG:
2610 str = "RMR_ERR_BADARG - argument passd to function was unusable";
2612 case RMR_ERR_NOENDPT:
2613 str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";
2616 str = "RMR_ERR_EMPTY - msg received had no payload; attempt to send an empty message";
2619 str = "RMR_ERR_NOHDR - message didn't contain a valid header";
2621 case RMR_ERR_SENDFAILED:
2622 str = "RMR_ERR_SENDFAILED - send failed; errno has nano reason";
2624 case RMR_ERR_CALLFAILED:
2625 str = "RMR_ERR_CALLFAILED - unable to send call() message";
2627 case RMR_ERR_NOWHOPEN:
2628 str = "RMR_ERR_NOWHOPEN - no wormholes are open";
2631 str = "RMR_ERR_WHID - wormhole id was invalid";
2633 case RMR_ERR_OVERFLOW:
2634 str = "RMR_ERR_OVERFLOW - operation would have busted through a buffer/field size";
2637 str = "RMR_ERR_RETRY - request (send/call/rts) failed, but caller should retry (EAGAIN for wrappers)";
2639 case RMR_ERR_RCVFAILED:
2640 str = "RMR_ERR_RCVFAILED - receive failed (hard error)";
2642 case RMR_ERR_TIMEOUT:
2643 str = "RMR_ERR_TIMEOUT - message processing call timed out";
2646 str = "RMR_ERR_UNSET - the message hasn't been populated with a transport buffer";
2649 str = "RMR_ERR_TRUNC - received message likely truncated";
2651 case RMR_ERR_INITFAILED:
2652 str = "RMR_ERR_INITFAILED - initialisation of something (probably message) failed";
2654 case RMR_ERR_NOTSUPP:
2655 str = "RMR_ERR_NOTSUPP - the request is not supported, or RMr was not initialised for the request";
2659 snprintf(buf, sizeof buf, "UNDOCUMENTED RMR_ERR : %d", state);