5.4.7 Update RMR library to 4.4.4
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
index 94827f9..d3e00df 100644 (file)
@@ -24,9 +24,9 @@
 #include "sctpThread.h"
 #include "BuildRunName.h"
 
-#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
-#include "BuildXml.h"
-#include "pugixml/src/pugixml.hpp"
+//#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
+//#include "BuildXml.h"
+//#include "pugixml/src/pugixml.hpp"
 
 using namespace std;
 //using namespace std::placeholders;
@@ -68,12 +68,12 @@ double age() {
     return seconds_t(std::chrono::high_resolution_clock::now() - start_time).count();
 }
 
-double approx_CPU_MHz(unsigned sleeptime) {
+double approx_CPU_MHz(unsigned sleepTime) {
     using namespace std::chrono_literals;
     uint32_t aux = 0;
     uint64_t cycles_start = rdtscp(aux);
     double time_start = age();
-    std::this_thread::sleep_for(sleeptime * 1ms);
+    std::this_thread::sleep_for(sleepTime * 1ms);
     uint64_t elapsed_cycles = rdtscp(aux) - cycles_start;
     double elapsed_time = age() - time_start;
     return elapsed_cycles / elapsed_time;
@@ -85,13 +85,18 @@ std::atomic<int64_t> num_of_XAPP_messages{0};
 static long transactionCounter = 0;
 
 int buildListeningPort(sctp_params_t &sctpParams) {
-    sctpParams.listenFD = socket (AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
-    struct sockaddr_in6 servaddr {};
-    servaddr.sin6_family = AF_INET6;
-    servaddr.sin6_addr   = in6addr_any;
-    servaddr.sin6_port = htons(sctpParams.sctpPort);
-    if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
-        mdclog_write(MDCLOG_ERR, "Error binding. %s\n", strerror(errno));
+    sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
+    if (sctpParams.listenFD <= 0) {
+        mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
+        return -1;
+    }
+
+    struct sockaddr_in6 serverAddress {};
+    serverAddress.sin6_family = AF_INET6;
+    serverAddress.sin6_addr   = in6addr_any;
+    serverAddress.sin6_port = htons(sctpParams.sctpPort);
+    if (bind(sctpParams.listenFD, (SA *)&serverAddress, sizeof(serverAddress)) < 0 ) {
+        mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
         return -1;
     }
     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
@@ -99,12 +104,12 @@ int buildListeningPort(sctp_params_t &sctpParams) {
         return -1;
     }
     if (mdclog_level_get() >= MDCLOG_DEBUG) {
-        struct sockaddr_in6 cliaddr {};
-        socklen_t len = sizeof(cliaddr);
-        getsockname(sctpParams.listenFD, (SA *)&cliaddr, &len);
+        struct sockaddr_in6 clientAddress {};
+        socklen_t len = sizeof(clientAddress);
+        getsockname(sctpParams.listenFD, (SA *)&clientAddress, &len);
         char buff[1024] {};
-        inet_ntop(AF_INET6, &cliaddr.sin6_addr, buff, sizeof(buff));
-        mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(cliaddr.sin6_port));
+        inet_ntop(AF_INET6, &clientAddress.sin6_addr, buff, sizeof(buff));
+        mdclog_write(MDCLOG_DEBUG, "My address: %s, port %d\n", buff, htons(clientAddress.sin6_port));
     }
 
     if (listen(sctpParams.listenFD, SOMAXCONN) < 0) {
@@ -147,7 +152,7 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     }
     int rmrPort = conf.getIntValue("nano");
     if (rmrPort == -1) {
-        mdclog_write(MDCLOG_ERR, "illigal RMR port ");
+        mdclog_write(MDCLOG_ERR, "illegal RMR port ");
         return -1;
     }
     sctpParams.rmrPort = (uint16_t)rmrPort;
@@ -155,7 +160,7 @@ int buildConfiguration(sctp_params_t &sctpParams) {
 
     auto tmpStr = conf.getStringValue("loglevel");
     if (tmpStr.length() == 0) {
-        mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
+        mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
         tmpStr = "info";
     }
     transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
@@ -169,14 +174,14 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     } else if ((tmpStr.compare("error")) == 0) {
         sctpParams.logLevel = MDCLOG_ERR;
     } else {
-        mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
+        mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
         sctpParams.logLevel = MDCLOG_INFO;
     }
     mdclog_level_set(sctpParams.logLevel);
 
     tmpStr = conf.getStringValue("volume");
     if (tmpStr.length() == 0) {
-        mdclog_write(MDCLOG_ERR, "illigal volume.");
+        mdclog_write(MDCLOG_ERR, "illegal volume.");
         return -1;
     }
 
@@ -193,37 +198,37 @@ int buildConfiguration(sctp_params_t &sctpParams) {
 
     sctpParams.myIP = conf.getStringValue("local-ip");
     if (sctpParams.myIP.length() == 0) {
-        mdclog_write(MDCLOG_ERR, "illigal local-ip.");
+        mdclog_write(MDCLOG_ERR, "illegal local-ip.");
         return -1;
     }
 
     int sctpPort = conf.getIntValue("sctp-port");
     if (sctpPort == -1) {
-        mdclog_write(MDCLOG_ERR, "illigal SCTP port ");
+        mdclog_write(MDCLOG_ERR, "illegal SCTP port ");
         return -1;
     }
     sctpParams.sctpPort = (uint16_t)sctpPort;
 
     sctpParams.fqdn = conf.getStringValue("external-fqdn");
     if (sctpParams.fqdn.length() == 0) {
-        mdclog_write(MDCLOG_ERR, "illigal external-fqdn");
+        mdclog_write(MDCLOG_ERR, "illegal external-fqdn");
         return -1;
     }
 
     std::string pod = conf.getStringValue("pod_name");
     if (pod.length() == 0) {
-        mdclog_write(MDCLOG_ERR, "illigal pod_name in config file");
+        mdclog_write(MDCLOG_ERR, "illegal pod_name in config file");
         return -1;
     }
     auto *podName = getenv(pod.c_str());
     if (podName == nullptr) {
-        mdclog_write(MDCLOG_ERR, "illigal pod_name or environment varible not exists : %s", pod.c_str());
+        mdclog_write(MDCLOG_ERR, "illegal pod_name or environment variable not exists : %s", pod.c_str());
         return -1;
 
     } else {
         sctpParams.podName.assign(podName);
         if (sctpParams.podName.length() == 0) {
-            mdclog_write(MDCLOG_ERR, "illigal pod_name");
+            mdclog_write(MDCLOG_ERR, "illegal pod_name");
             return -1;
         }
     }
@@ -240,24 +245,6 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     jsonTrace = sctpParams.trace;
 
     sctpParams.epollTimeOut = -1;
-    tmpStr = conf.getStringValue("prometheusMode");
-    transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
-    if (tmpStr.length() != 0) {
-        if (tmpStr.compare("push") == 0) {
-            sctpParams.prometheusPushAddress = tmpStr;
-            auto timeout = conf.getIntValue("prometheusPushTimeOut");
-            if (timeout >= 5 && timeout <= 300) {
-                sctpParams.epollTimeOut = timeout * 1000;
-            } else {
-                sctpParams.epollTimeOut = 10 * 1000;
-            }
-        }
-    }
-
-    tmpStr = conf.getStringValue("prometheusPushAddr");
-    if (tmpStr.length() != 0) {
-        sctpParams.prometheusMode = tmpStr;
-    }
 
     tmpStr = conf.getStringValue("prometheusPort");
     if (tmpStr.length() != 0) {
@@ -293,7 +280,7 @@ int buildConfiguration(sctp_params_t &sctpParams) {
             //keywords::format = "[%TimeStamp%]: %Message%" // use each tmpStr with time stamp
     );
 
-    // Setup a destination folder for collecting rotated (closed) files --since the same volumn can use rename()
+    // Setup a destination folder for collecting rotated (closed) files --since the same volume can use rename()
     boostLogger->locked_backend()->set_file_collector(sinks::file::make_collector(
             keywords::target = sctpParams.volume
     ));
@@ -309,17 +296,21 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     return 0;
 }
 
-static std::string GetHostName() {
-    char hostname[1024];
+void startPrometheus(sctp_params_t &sctpParams) {
+    sctpParams.prometheusFamily = &BuildCounter()
+            .Name("E2T")
+            .Help("E2T message counter")
+            .Labels({{"POD_NAME", sctpParams.podName}})
+            .Register(*sctpParams.prometheusRegistry);
 
-    if (::gethostname(hostname, sizeof(hostname))) {
-        return {};
+    string prometheusPath = sctpParams.prometheusPort + "," + "[::]:" + sctpParams.prometheusPort;
+    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s", prometheusPath.c_str());
     }
-    return hostname;
+    sctpParams.prometheusExposer = new Exposer(prometheusPath, 1);
+    sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
 }
 
-
-
 int main(const int argc, char **argv) {
     sctp_params_t sctpParams;
 
@@ -364,14 +355,9 @@ int main(const int argc, char **argv) {
     //auto registry = std::make_shared<Registry>();
     sctpParams.prometheusRegistry = std::make_shared<Registry>();
 
-    //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
-
-    sctpParams.prometheusFamily = &BuildCounter()
-            .Name("E2T")
-            .Help("E2T message counter")
-            .Labels({{"E", sctpParams.podName}})
-            .Register(*sctpParams.prometheusRegistry);
+    //sctpParams.prometheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
 
+    startPrometheus(sctpParams);
 
     // start epoll
     sctpParams.epoll_fd = epoll_create1(0);
@@ -405,28 +391,7 @@ int main(const int argc, char **argv) {
     std::vector<std::thread> threads(num_cpus);
 //    std::vector<std::thread> threads;
 
-    if (sctpParams.prometheusMode.compare("pull") == 0) {
-        sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
-        sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
-    } else if (sctpParams.prometheusMode.compare("push") == 0) {
-        const auto labels = Gateway::GetInstanceLabel(GetHostName());
-        string address {};
-        string port {};
-        char ch = ':';
-        auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
-        // If string doesn't have
-        // character ch present in it
-        if (found != string::npos) {
-            address = sctpParams.prometheusPushAddress.substr(0,found);
-            port = sctpParams.prometheusPushAddress.substr(found + 1);
-            sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
-            sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
-        } else {
-            mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
-        }
-    }
-
-    num_cpus = 1;
+    num_cpus = 3;
     for (unsigned int i = 0; i < num_cpus; i++) {
         threads[i] = std::thread(listener, &sctpParams);
 
@@ -460,7 +425,7 @@ void handleTermInit(sctp_params_t &sctpParams) {
         auto xappMessages = num_of_XAPP_messages.load(std::memory_order_acquire);
         if (xappMessages > 0) {
             if (mdclog_level_get() >=  MDCLOG_INFO) {
-                mdclog_write(MDCLOG_INFO, "Got a message from some appliction, stop sending E2_TERM_INIT");
+                mdclog_write(MDCLOG_INFO, "Got a message from some application, stop sending E2_TERM_INIT");
             }
             return;
         }
@@ -489,7 +454,7 @@ void sendTermInit(sctp_params_t &sctpParams) {
         } else if (msg->state == 0) {
             rmr_free_msg(msg);
             if (mdclog_level_get() >=  MDCLOG_INFO) {
-                mdclog_write(MDCLOG_INFO, "E2_TERM_INIT succsesfuly sent ");
+                mdclog_write(MDCLOG_INFO, "E2_TERM_INIT successfully sent ");
             }
             return;
         } else {
@@ -517,7 +482,7 @@ cxxopts::ParseResult parse(int argc, char *argv[], sctp_params_t &sctpParams) {
             ("f,file", "config file name", cxxopts::value<std::string>(sctpParams.configFileName)->default_value("config.conf"))
             ("h,help", "Print help");
 
-    auto result = options.parse(argc, argv);
+    auto result = options.parse(argc, (const char **&)argv);
 
     if (result.count("help")) {
         std::cout << options.help({""}) << std::endl;
@@ -535,9 +500,6 @@ int buildInotify(sctp_params_t &sctpParams) {
     sctpParams.inotifyFD = inotify_init1(IN_NONBLOCK);
     if (sctpParams.inotifyFD == -1) {
         mdclog_write(MDCLOG_ERR, "Failed to init inotify (inotify_init1) %s", strerror(errno));
-        close(sctpParams.rmrListenFd);
-        rmr_close(sctpParams.rmrCtx);
-        close(sctpParams.epoll_fd);
         return -1;
     }
 
@@ -615,23 +577,19 @@ void listener(sctp_params_t *params) {
 
     ReportingMessages_t message {};
 
-//    for (int i = 0; i < MAX_RMR_BUFF_ARRY; i++) {
+//    for (int i = 0; i < MAX_RMR_BUFF_ARRAY; i++) {
 //        rmrMessageBuffer.rcvBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
 //    }
 
-    bool gatewayflag = false;
     while (true) {
-        future<int> gateWay;
-
         if (mdclog_level_get() >= MDCLOG_DEBUG) {
             mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
         }
         auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
-        if (numOfEvents == 0) {
-            if (params->prometheusGateway != nullptr) {
-                gateWay = params->prometheusGateway->AsyncPush();
-                gatewayflag = true;
+        if (numOfEvents == 0) { // time out
+            if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "got epoll timeout");
             }
             continue;
         } else if (numOfEvents < 0) {
@@ -644,15 +602,6 @@ void listener(sctp_params_t *params) {
             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
             return;
         }
-        if (gatewayflag) {
-            gatewayflag = false;
-            auto rc = gateWay.get();
-            if (rc != 200) {
-                mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
-            } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
-            }
-        }
         for (auto i = 0; i < numOfEvents; i++) {
             if (mdclog_level_get() >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
@@ -679,6 +628,10 @@ void listener(sctp_params_t *params) {
 
                     in_len = sizeof(in_addr);
                     auto *peerInfo = (ConnectedCU_t *)calloc(1, sizeof(ConnectedCU_t));
+                    if(peerInfo == nullptr){
+                        mdclog_write(MDCLOG_ERR, "calloc failed");
+                        break;
+                    }
                     peerInfo->sctpParams = params;
                     peerInfo->fileDescriptor = accept(params->listenFD, &in_addr, &in_len);
                     if (peerInfo->fileDescriptor == -1) {
@@ -719,10 +672,10 @@ void listener(sctp_params_t *params) {
                 }
             } else if (params->rmrListenFd == events[i].data.fd) {
                 // got message from XAPP
-                num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
+                //num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
                 num_of_messages.fetch_add(1, std::memory_order_release);
                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                    mdclog_write(MDCLOG_DEBUG, "new message from RMR");
+                    mdclog_write(MDCLOG_DEBUG, "new RMR message");
                 }
                 if (receiveXappMessages(params->sctpMap,
                                         rmrMessageBuffer,
@@ -828,7 +781,7 @@ void handleConfigChange(sctp_params_t *sctpParams) {
 
                 auto tmpStr = conf.getStringValue("loglevel");
                 if (tmpStr.length() == 0) {
-                    mdclog_write(MDCLOG_ERR, "illigal loglevel. Set loglevel to MDCLOG_INFO");
+                    mdclog_write(MDCLOG_ERR, "illegal loglevel. Set loglevel to MDCLOG_INFO");
                     tmpStr = "info";
                 }
                 transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
@@ -846,7 +799,7 @@ void handleConfigChange(sctp_params_t *sctpParams) {
                     mdclog_write(MDCLOG_INFO, "Log level set to MDCLOG_ERR");
                     sctpParams->logLevel = MDCLOG_ERR;
                 } else {
-                    mdclog_write(MDCLOG_ERR, "illigal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
+                    mdclog_write(MDCLOG_ERR, "illegal loglevel = %s. Set loglevel to MDCLOG_INFO", tmpStr.c_str());
                     sctpParams->logLevel = MDCLOG_INFO;
                 }
                 mdclog_level_set(sctpParams->logLevel);
@@ -854,7 +807,7 @@ void handleConfigChange(sctp_params_t *sctpParams) {
 
                 tmpStr = conf.getStringValue("trace");
                 if (tmpStr.length() == 0) {
-                    mdclog_write(MDCLOG_ERR, "illigal trace. Set trace to stop");
+                    mdclog_write(MDCLOG_ERR, "illegal trace. Set trace to stop");
                     tmpStr = "stop";
                 }
 
@@ -871,15 +824,6 @@ void handleConfigChange(sctp_params_t *sctpParams) {
                 }
                 jsonTrace = sctpParams->trace;
 
-                if (sctpParams->prometheusMode.compare("push") == 0) {
-                    auto timeout = conf.getIntValue("prometheusPushTimeOut");
-                    if (timeout >= 5 && timeout <= 300) {
-                        sctpParams->epollTimeOut = timeout * 1000;
-                    } else {
-                        mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
-                                     timeout);
-                    }
-                }
 
                 endlessLoop = false;
             }
@@ -1032,13 +976,13 @@ void cleanHashEntry(ConnectedCU_t *val, Sctp_Map_t *m) {
 
 /**
  *
- * @param fd file discriptor
+ * @param fd file descriptor
  * @param data the asn data to send
  * @param len  length of the data
  * @param enodbName the enodbName as in the map for printing purpose
  * @param m map host information
  * @param mtype message number
- * @return 0 success, anegative number on fail
+ * @return 0 success, a negative number on fail
  */
 int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_t *m) {
     auto loglevel = mdclog_level_get();
@@ -1129,7 +1073,7 @@ int receiveDataFromSctp(struct epoll_event *events,
     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
 
     struct timespec start{0, 0};
-    struct timespec decodestart{0, 0};
+    struct timespec decodeStart{0, 0};
     struct timespec end{0, 0};
 
     E2AP_PDU_t *pdu = nullptr;
@@ -1178,7 +1122,7 @@ int receiveDataFromSctp(struct epoll_event *events,
         }
 
         if (loglevel >= MDCLOG_DEBUG) {
-            char printBuffer[4096]{};
+            char printBuffer[RECEIVE_SCTP_BUFFER_SIZE]{};
             char *tmp = printBuffer;
             for (size_t i = 0; i < (size_t)message.message.asnLength; ++i) {
                 snprintf(tmp, 3, "%02x", message.message.asndata[i]);
@@ -1190,7 +1134,7 @@ int receiveDataFromSctp(struct epoll_event *events,
                          message.peerInfo->enodbName, end.tv_sec - start.tv_sec, end.tv_nsec - start.tv_nsec);
             mdclog_write(MDCLOG_DEBUG, "PDU buffer length = %ld, data =  : %s", message.message.asnLength,
                          printBuffer);
-            clock_gettime(CLOCK_MONOTONIC, &decodestart);
+            clock_gettime(CLOCK_MONOTONIC, &decodeStart);
         }
 
         auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
@@ -1204,13 +1148,13 @@ int receiveDataFromSctp(struct epoll_event *events,
         if (loglevel >= MDCLOG_DEBUG) {
             clock_gettime(CLOCK_MONOTONIC, &end);
             mdclog_write(MDCLOG_DEBUG, "After Encoding E2AP PDU for : %s, Read time is : %ld seconds, %ld nanoseconds",
-                         message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
+                         message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
             char *printBuffer;
             size_t size;
             FILE *stream = open_memstream(&printBuffer, &size);
             asn_fprint(stream, &asn_DEF_E2AP_PDU, pdu);
             mdclog_write(MDCLOG_DEBUG, "Encoding E2AP PDU past : %s", printBuffer);
-            clock_gettime(CLOCK_MONOTONIC, &decodestart);
+            clock_gettime(CLOCK_MONOTONIC, &decodeStart);
         }
 
         switch (pdu->present) {
@@ -1219,7 +1163,7 @@ int receiveDataFromSctp(struct epoll_event *events,
                 break;
             }
             case E2AP_PDU_PR_successfulOutcome: { //successful outcome
-                asnSuccsesfulMsg(pdu, sctpMap, message,  rmrMessageBuffer);
+                asnSuccessfulMsg(pdu, sctpMap, message, rmrMessageBuffer);
                 break;
             }
             case E2AP_PDU_PR_unsuccessfulOutcome: { //Unsuccessful Outcome
@@ -1234,7 +1178,7 @@ int receiveDataFromSctp(struct epoll_event *events,
             clock_gettime(CLOCK_MONOTONIC, &end);
             mdclog_write(MDCLOG_DEBUG,
                          "After processing message and sent to rmr for : %s, Read time is : %ld seconds, %ld nanoseconds",
-                         message.peerInfo->enodbName, end.tv_sec - decodestart.tv_sec, end.tv_nsec - decodestart.tv_nsec);
+                         message.peerInfo->enodbName, end.tv_sec - decodeStart.tv_sec, end.tv_nsec - decodeStart.tv_nsec);
         }
         numOfMessages++;
         if (pdu != nullptr) {
@@ -1274,21 +1218,20 @@ int receiveDataFromSctp(struct epoll_event *events,
     return 0;
 }
 
-static void buildAndsendSetupRequest(ReportingMessages_t &message,
+static void buildAndSendSetupRequest(ReportingMessages_t &message,
                                      RmrMessagesBuffer_t &rmrMessageBuffer,
-                                     E2AP_PDU_t *pdu,
+                                     E2AP_PDU_t *pdu/*,
                                      string const &messageName,
                                      string const &ieName,
                                      vector<string> &functionsToAdd_v,
-                                     vector<string> &functionsToModified_v) {
+                                     vector<string> &functionsToModified_v*/) {
     auto logLevel = mdclog_level_get();
     // now we can send the data to e2Mgr
 
     asn_enc_rval_t er;
     auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
-    unsigned char *buffer;
+    unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
     while (true) {
-        buffer = (unsigned char *)malloc(buffer_size);
         er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
         if (er.encoded == -1) {
             mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
@@ -1299,7 +1242,7 @@ static void buildAndsendSetupRequest(ReportingMessages_t &message,
                          (int) buffer_size,
                          asn_DEF_E2AP_PDU.name, buffer_size);
             buffer_size = er.encoded + 128;
-            free(buffer);
+//            free(buffer);
             continue;
         }
         buffer[er.encoded] = '\0';
@@ -1307,24 +1250,29 @@ static void buildAndsendSetupRequest(ReportingMessages_t &message,
     }
     // encode to xml
 
-    string res {};
-    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
-        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
-    }
+    string res((char *)buffer);
+    res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
+    res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
+    res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
+
+//    string res {};
+//    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
+//        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
+//    }
     rmr_mbuf_t *rmrMsg;
-    if (res.length() == 0) {
-        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
-        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
-                               message.peerInfo->sctpParams->myIP.c_str(),
-                               message.peerInfo->sctpParams->rmrPort,
-                               buffer);
-    } else {
+//    if (res.length() == 0) {
+//        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
+//        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
+//                               message.peerInfo->sctpParams->myIP.c_str(),
+//                               message.peerInfo->sctpParams->rmrPort,
+//                               buffer);
+//    } else {
         rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
         rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
                                message.peerInfo->sctpParams->myIP.c_str(),
                                message.peerInfo->sctpParams->rmrPort,
                                res.c_str());
-    }
+//    }
 
     if (logLevel >= MDCLOG_DEBUG) {
         mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
@@ -1370,9 +1318,9 @@ static void buildAndsendSetupRequest(ReportingMessages_t &message,
     if (rmrMsg != nullptr) {
         rmr_free_msg(rmrMsg);
     }
-    free(buffer);
 }
 
+#if 0
 int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
     auto index = 0;
     runFunXML_v.clear();
@@ -1465,79 +1413,80 @@ int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
     return 0;
 }
 
+#endif
 
 
-void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
-    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
-    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
+void buildPrometheusList(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
 
-    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
-    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
 
-    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
-    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
 
-    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
-    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
 
-    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
-    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
     // ---------------------------------------------
-    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
-    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
 
-    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
-    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
 
-    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
-    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
 
-    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
-    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
     //-------------------------------------------------------------
 
-    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
-    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
 
-    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
-    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
 
-    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
-    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
 
     //====================================================================================
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
 
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
 
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
 
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
 
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
 
-    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
-    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
     //---------------------------------------------------------------------------------------------------------
-    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
-    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
 
-    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
-    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
 
-    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
-    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
     //----------------------------------------------------------------------------------------------------------------
-    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
-    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
+    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
+    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
 
-    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
-    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate - 1)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
+    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
+    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
 }
 /**
  *
@@ -1549,8 +1498,7 @@ void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFam
  */
 int collectSetupRequestData(E2AP_PDU_t *pdu,
                                      Sctp_Map_t *sctpMap,
-                                     ReportingMessages_t &message,
-                                     vector <string> &RANfunctionsAdded_v) {
+                                     ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
@@ -1559,14 +1507,14 @@ int collectSetupRequestData(E2AP_PDU_t *pdu,
             if (ie->value.present == E2setupRequestIEs__value_PR_GlobalE2node_ID) {
                 if (buildRanName(message.peerInfo->enodbName, ie) < 0) {
                     mdclog_write(MDCLOG_ERR, "Bad param in E2setupRequestIEs GlobalE2node_ID.\n");
-                    // no mesage will be sent
+                    // no message will be sent
                     return -1;
                 }
 
                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
             }
-        } else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
+        } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
                     mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
@@ -1576,12 +1524,12 @@ int collectSetupRequestData(E2AP_PDU_t *pdu,
                     return -1;
                 }
             }
-        }
-    }
-    if (mdclog_level_get() >= MDCLOG_DEBUG) {
-        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
-                     RANfunctionsAdded_v.size());
+        } */
     }
+//    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+//        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
+//                     RANfunctionsAdded_v.size());
+//    }
     return 0;
 }
 
@@ -1641,52 +1589,52 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
                 mdclog_write(MDCLOG_DEBUG, "Got E2setup");
             }
 
-            vector <string> RANfunctionsAdded_v;
-            vector <string> RANfunctionsModified_v;
-            RANfunctionsAdded_v.clear();
-            RANfunctionsModified_v.clear();
-            if (collectSetupRequestData(pdu, sctpMap, message, RANfunctionsAdded_v) != 0) {
+//            vector <string> RANfunctionsAdded_v;
+//            vector <string> RANfunctionsModified_v;
+//            RANfunctionsAdded_v.clear();
+//            RANfunctionsModified_v.clear();
+            if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
                 break;
             }
 
-            buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
+            buildPrometheusList(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
 
             string messageName("E2setupRequest");
             string ieName("E2setupRequestIEs");
             message.message.messageType = RIC_E2_SETUP_REQ;
-            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
-            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
-            buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
+            buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
             break;
         }
         case ProcedureCode_id_RICserviceUpdate: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
             }
-            vector <string> RANfunctionsAdded_v;
-            vector <string> RANfunctionsModified_v;
-            RANfunctionsAdded_v.clear();
-            RANfunctionsModified_v.clear();
-            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
-                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
-                break;
-            }
+//            vector <string> RANfunctionsAdded_v;
+//            vector <string> RANfunctionsModified_v;
+//            RANfunctionsAdded_v.clear();
+//            RANfunctionsModified_v.clear();
+//            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
+//                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
+//                break;
+//            }
 
             string messageName("RICserviceUpdate");
             string ieName("RICserviceUpdateIEs");
             message.message.messageType = RIC_SERVICE_UPDATE;
-            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
-            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
 
-            buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, messageName, ieName, RANfunctionsAdded_v, RANfunctionsModified_v);
+            buildAndSendSetupRequest(message, rmrMessageBuffer, pdu);
             break;
         }
         case ProcedureCode_id_ErrorIndication: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
-            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
             }
@@ -1697,8 +1645,8 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
             }
 
-            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
-            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
                 break;
             }
@@ -1741,12 +1689,12 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
                                          ie->value.choice.RICrequestID.ricInstanceID,
                                          ie->value.choice.RICrequestID.ricRequestorID);
                         }
-                        message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication - 1]->Increment();
-                        message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+                        message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
+                        message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
                     } else {
-                        mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
+                        mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
                     }
                 }
                 if (messageSent) {
@@ -1772,7 +1720,7 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
  * @param message
  * @param rmrMessageBuffer
  */
-void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
+void asnSuccessfulMsg(E2AP_PDU_t *pdu,
                       Sctp_Map_t *sctpMap,
                       ReportingMessages_t &message,
                       RmrMessagesBuffer_t &rmrMessageBuffer) {
@@ -1786,8 +1734,8 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
-            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
             if (XML_From_PER(message, rmrMessageBuffer) < 0) {
                 break;
             }
@@ -1824,12 +1772,12 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
                                        (unsigned char *)message.message.enodbName,
                                        strlen(message.message.enodbName));
 
-                        message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
-                        message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+                        message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+                        message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
                     } else {
-                        mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
+                        mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
                     }
                 }
                 if (messageSent) {
@@ -1843,8 +1791,8 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
-            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
             }
@@ -1854,8 +1802,8 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
-            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
             }
@@ -1912,12 +1860,12 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
                                        strlen(message.message.enodbName));
-                        message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
-                        message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+                        message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+                        message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
                     } else {
-                        mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
+                        mdclog_write(MDCLOG_ERR, "RIC request id missing illegal request");
                     }
                 }
                 if (messageSent) {
@@ -1930,8 +1878,8 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
-            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
             }
@@ -1941,8 +1889,8 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
             }
-            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
-            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment((double)rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
             }
@@ -2044,6 +1992,9 @@ int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuf
     }
     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
+    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "%s After  decoding the XML to PDU", __func__ );
+    }
     if (rval.code != RC_OK) {
         mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
                      rval.code,
@@ -2054,6 +2005,9 @@ int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuf
     int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
     auto er = asn_encode_to_buffer(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, pdu,
                                    rmrMessageBuffer.rcvMessage->payload, buff_size);
+    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "%s After encoding PDU to PER", __func__ );
+    }
     if (er.encoded == -1) {
         mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
         return -1;
@@ -2079,18 +2033,19 @@ int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuf
 int receiveXappMessages(Sctp_Map_t *sctpMap,
                         RmrMessagesBuffer_t &rmrMessageBuffer,
                         struct timespec &ts) {
+    int loglevel = mdclog_level_get();
     if (rmrMessageBuffer.rcvMessage == nullptr) {
         //we have error
         mdclog_write(MDCLOG_ERR, "RMR Allocation message, %s", strerror(errno));
         return -1;
     }
 
-    if (mdclog_level_get() >= MDCLOG_DEBUG) {
-        mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
-    }
+//    if (loglevel >= MDCLOG_DEBUG) {
+//        mdclog_write(MDCLOG_DEBUG, "Call to rmr_rcv_msg");
+//    }
     rmrMessageBuffer.rcvMessage = rmr_rcv_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
     if (rmrMessageBuffer.rcvMessage == nullptr) {
-        mdclog_write(MDCLOG_ERR, "RMR Receving message with null pointer, Realloc rmr mesage buffer");
+        mdclog_write(MDCLOG_ERR, "RMR Receiving message with null pointer, Reallocated rmr message buffer");
         rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
         return -2;
     }
@@ -2102,7 +2057,7 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
     // get message payload
     //auto msgData = msg->payload;
     if (rmrMessageBuffer.rcvMessage->state != 0) {
-        mdclog_write(MDCLOG_ERR, "RMR Receving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
+        mdclog_write(MDCLOG_ERR, "RMR Receiving message with stat = %d", rmrMessageBuffer.rcvMessage->state);
         return -1;
     }
     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
@@ -2120,13 +2075,20 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
         }
     }
 
+    if (rmrMessageBuffer.rcvMessage->mtype != RIC_HEALTH_CHECK_REQ) {
+        num_of_XAPP_messages.fetch_add(1, std::memory_order_release);
+
+    }
     switch (rmrMessageBuffer.rcvMessage->mtype) {
         case RIC_E2_SETUP_RESP : {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_RESP");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
-            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
                 return -6;
@@ -2134,11 +2096,14 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_E2_SETUP_FAILURE : {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_E2_SETUP_FAILURE");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup - 1]->Increment();
-            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
                 return -6;
@@ -2146,8 +2111,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_ERROR_INDICATION: {
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_ERROR_INDICATION");
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
                 return -6;
@@ -2155,8 +2123,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SUB_REQ: {
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_SUB_REQ");
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
                 return -6;
@@ -2164,8 +2135,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SUB_DEL_REQ: {
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_SUB_DEL_REQ");
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
                 return -6;
@@ -2173,8 +2147,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_CONTROL_REQ: {
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_CONTROL_REQ");
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
                 return -6;
@@ -2182,11 +2159,14 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_QUERY: {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_QUERY");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
                 return -6;
@@ -2194,11 +2174,18 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_UPDATE_ACK: {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_ACK");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                mdclog_write(MDCLOG_ERR, "error in PER_FromXML");
                 break;
             }
-            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
-            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "Before sending to CU");
+            }
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
                 return -6;
@@ -2206,11 +2193,14 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_UPDATE_FAILURE: {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_SERVICE_UPDATE_FAILURE");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate - 1]->Increment();
-            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
                 return -6;
@@ -2218,11 +2208,14 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_E2_RESET_REQ: {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_REQ");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
-            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
                 return -6;
@@ -2230,11 +2223,14 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_E2_RESET_RESP: {
+            if (loglevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "RIC_E2_RESET_RESP");
+            }
             if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset - 1]->Increment();
-            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset - 1]->Increment(rmrMessageBuffer.rcvMessage->len);
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
                 return -6;
@@ -2293,38 +2289,40 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             } else if (rmrMessageBuffer.sendMessage->state != 0)  {
                 mdclog_write(MDCLOG_ERR, "Failed to send E2_TERM_KEEP_ALIVE_RESP, on RMR state = %d ( %s)",
                              rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
-            } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
+            } else if (loglevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got Keep Alive Request send : %s", rmrMessageBuffer.ka_message);
             }
 
             break;
         }
         case RIC_HEALTH_CHECK_REQ: {
+            static int counter = 0;
             // send message back
-            rmr_bytes2payload(rmrMessageBuffer.sendMessage,
+            rmr_bytes2payload(rmrMessageBuffer.rcvMessage,
                               (unsigned char *)"OK",
                               2);
-            rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
-            rmrMessageBuffer.sendMessage->state = 0;
+            rmrMessageBuffer.rcvMessage->mtype = RIC_HEALTH_CHECK_RESP;
+            rmrMessageBuffer.rcvMessage->state = 0;
             static unsigned char tx[32];
             auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
-            rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
-            rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
-            if (rmrMessageBuffer.sendMessage == nullptr) {
-                rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
+            rmr_bytes2xact(rmrMessageBuffer.rcvMessage, tx, txLen);
+            rmrMessageBuffer.rcvMessage = rmr_rts_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.rcvMessage);
+            //rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
+            if (rmrMessageBuffer.rcvMessage == nullptr) {
+                rmrMessageBuffer.rcvMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
-            } else if (rmrMessageBuffer.sendMessage->state != 0)  {
+            } else if (rmrMessageBuffer.rcvMessage->state != 0)  {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
-                             rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
-            } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
+                             rmrMessageBuffer.rcvMessage->state, translateRmrErrorMessages(rmrMessageBuffer.rcvMessage->state).c_str());
+            } else if (loglevel >= MDCLOG_DEBUG && ++counter % 100 == 0) {
+                mdclog_write(MDCLOG_DEBUG, "Got %d RIC_HEALTH_CHECK_REQ Request send : OK", counter);
             }
 
             break;
         }
 
         default:
-            mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
+            mdclog_write(MDCLOG_WARN, "Message Type : %d is not supported", rmrMessageBuffer.rcvMessage->mtype);
             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
             message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
             message.message.time.tv_nsec = ts.tv_nsec;
@@ -2354,6 +2352,9 @@ int sendDirectionalSctpMsg(RmrMessagesBuffer_t &messageBuffer,
                            ReportingMessages_t &message,
                            int failedMsgId,
                            Sctp_Map_t *sctpMap) {
+    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "send message: %d to %s address", message.message.messageType, message.message.enodbName);
+    }
 
     getRequestMetaData(message, messageBuffer);
     if (mdclog_level_get() >= MDCLOG_INFO) {
@@ -2382,32 +2383,6 @@ int sendMessagetoCu(Sctp_Map_t *sctpMap,
     return rc;
 }
 
-/**
- *
- * @param rmrCtx the rmr context to send and receive
- * @param msg the msg we got fromxApp
- * @param metaData data from xApp in ordered struct
- * @param failedMesgId the return message type error
- */
-void
-sendFailedSendingMessagetoXapp(RmrMessagesBuffer_t &rmrMessageBuffer, ReportingMessages_t &message, int failedMesgId) {
-    rmr_mbuf_t *msg = rmrMessageBuffer.sendMessage;
-    msg->len = snprintf((char *) msg->payload, 200, "the gNb/eNode name %s not found",
-                        message.message.enodbName);
-    if (mdclog_level_get() >= MDCLOG_INFO) {
-        mdclog_write(MDCLOG_INFO, "%s", msg->payload);
-    }
-    msg->mtype = failedMesgId;
-    msg->state = 0;
-
-    static unsigned char tx[32];
-    snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
-    rmr_bytes2xact(msg, tx, strlen((const char *) tx));
-
-    sendRmrMessage(rmrMessageBuffer, message);
-}
-
-
 
 /**
  *
@@ -2431,7 +2406,7 @@ int addToEpoll(int epoll_fd,
     event.events = events;
     if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, peerInfo->fileDescriptor, &event) < 0) {
         if (mdclog_level_get() >= MDCLOG_DEBUG) {
-            mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here), %s, %s %d",
+            mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here), %s, %s %d",
                          strerror(errno), __func__, __LINE__);
         }
         close(peerInfo->fileDescriptor);
@@ -2450,7 +2425,7 @@ int addToEpoll(int epoll_fd,
         } else {
             peerInfo->enodbName[0] = 0;
         }
-        mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
+        mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
         return -1;
     }
     return 0;
@@ -2478,7 +2453,7 @@ int modifyToEpoll(int epoll_fd,
     event.events = events;
     if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, peerInfo->fileDescriptor, &event) < 0) {
         if (mdclog_level_get() >= MDCLOG_DEBUG) {
-            mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may chack not to quit here), %s, %s %d",
+            mdclog_write(MDCLOG_DEBUG, "epoll_ctl EPOLL_CTL_MOD (may check not to quit here), %s, %s %d",
                          strerror(errno), __func__, __LINE__);
         }
         close(peerInfo->fileDescriptor);
@@ -2493,7 +2468,7 @@ int modifyToEpoll(int epoll_fd,
             free(tmp);
         }
         sctpMap->erase(key);
-        mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may chack not to quit here)");
+        mdclog_write(MDCLOG_ERR, "epoll_ctl EPOLL_CTL_ADD (may check not to quit here)");
         return -1;
     }
     return 0;
@@ -2590,7 +2565,7 @@ string translateRmrErrorMessages(int state) {
             str = "RMR_OK - state is good";
             break;
         case RMR_ERR_BADARG:
-            str = "RMR_ERR_BADARG - argument passd to function was unusable";
+            str = "RMR_ERR_BADARG - argument passed to function was unusable";
             break;
         case RMR_ERR_NOENDPT:
             str = "RMR_ERR_NOENDPT - send//call could not find an endpoint based on msg type";