5.0.4 Fix Prometheus bug in number of bytes.
[ric-plt/e2.git] / RIC-E2-TERMINATION / sctpThread.cpp
index f80b862..987bbaf 100644 (file)
 #include "sctpThread.h"
 #include "BuildRunName.h"
 
-#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
-#include "BuildXml.h"
-#include "pugixml/src/pugixml.hpp"
+//#include "3rdparty/oranE2SM/E2SM-gNB-NRT-RANfunction-Definition.h"
+//#include "BuildXml.h"
+//#include "pugixml/src/pugixml.hpp"
 
 using namespace std;
 //using namespace std::placeholders;
 using namespace boost::filesystem;
+using namespace prometheus;
+
 
 //#ifdef __cplusplus
 //extern "C"
@@ -83,13 +85,18 @@ std::atomic<int64_t> num_of_XAPP_messages{0};
 static long transactionCounter = 0;
 
 int buildListeningPort(sctp_params_t &sctpParams) {
-    sctpParams.listenFD = socket (AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
+    sctpParams.listenFD = socket(AF_INET6, SOCK_STREAM, IPPROTO_SCTP);
+    if (sctpParams.listenFD <= 0) {
+        mdclog_write(MDCLOG_ERR, "Error Opening socket, %s", strerror(errno));
+        return -1;
+    }
+
     struct sockaddr_in6 servaddr {};
     servaddr.sin6_family = AF_INET6;
     servaddr.sin6_addr   = in6addr_any;
     servaddr.sin6_port = htons(sctpParams.sctpPort);
     if (bind(sctpParams.listenFD, (SA *)&servaddr, sizeof(servaddr)) < 0 ) {
-        mdclog_write(MDCLOG_ERR, "Error binding. %s\n", strerror(errno));
+        mdclog_write(MDCLOG_ERR, "Error binding port %d. %s", sctpParams.sctpPort, strerror(errno));
         return -1;
     }
     if (setSocketNoBlocking(sctpParams.listenFD) == -1) {
@@ -237,6 +244,32 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     }
     jsonTrace = sctpParams.trace;
 
+    sctpParams.epollTimeOut = -1;
+    tmpStr = conf.getStringValue("prometheusMode");
+    transform(tmpStr.begin(), tmpStr.end(), tmpStr.begin(), ::tolower);
+    sctpParams.prometheusMode = tmpStr;
+    if (tmpStr.length() != 0) {
+        if (tmpStr.compare("push") == 0) {
+            sctpParams.prometheusPushAddress = tmpStr;
+            auto timeout = conf.getIntValue("prometheusPushTimeOut");
+            if (timeout >= 5 && timeout <= 300) {
+                sctpParams.epollTimeOut = timeout * 1000;
+            } else {
+                sctpParams.epollTimeOut = 10 * 1000;
+            }
+        }
+    }
+
+    tmpStr = conf.getStringValue("prometheusPushAddr");
+    if (tmpStr.length() != 0) {
+        sctpParams.prometheusPushAddress = tmpStr;
+    }
+
+    tmpStr = conf.getStringValue("prometheusPort");
+    if (tmpStr.length() != 0) {
+        sctpParams.prometheusPort = tmpStr;
+    }
+
     sctpParams.ka_message_length = snprintf(sctpParams.ka_message, KA_MESSAGE_SIZE, "{\"address\": \"%s:%d\","
                                                                                     "\"fqdn\": \"%s\","
                                                                                     "\"pod_name\": \"%s\"}",
@@ -282,7 +315,49 @@ int buildConfiguration(sctp_params_t &sctpParams) {
     return 0;
 }
 
+static std::string GetHostName() {
+    char hostname[1024];
+
+    if (::gethostname(hostname, sizeof(hostname))) {
+        return {};
+    }
+    return hostname;
+}
 
+void startPrometheus(sctp_params_t &sctpParams) {
+    sctpParams.prometheusFamily = &BuildCounter()
+            .Name("E2T")
+            .Help("E2T message counter")
+            .Labels({{"POD_NAME", sctpParams.podName}})
+            .Register(*sctpParams.prometheusRegistry);
+
+    if (strcmp(sctpParams.prometheusMode.c_str(),"pull") == 0) {
+        if (mdclog_level_get() >= MDCLOG_DEBUG) {
+            mdclog_write(MDCLOG_DEBUG, "Start Prometheus Pull mode on %s:%s", sctpParams.myIP.c_str(), sctpParams.prometheusPort.c_str());
+        }
+        sctpParams.prometheusExposer = new Exposer(sctpParams.myIP + ":" + sctpParams.prometheusPort, 1);
+        sctpParams.prometheusExposer->RegisterCollectable(sctpParams.prometheusRegistry);
+    } else if (strcmp(sctpParams.prometheusMode.c_str(),"push") == 0) {
+        if (mdclog_level_get() >= MDCLOG_DEBUG) {
+            mdclog_write(MDCLOG_DEBUG, "Start Prometheus Push mode");
+        }
+        const auto labels = Gateway::GetInstanceLabel(GetHostName());
+        string address {};
+        string port {};
+        char ch = ':';
+        auto found = sctpParams.prometheusPushAddress.find_last_of(ch);
+        // If string doesn't have
+        // character ch present in it
+        if (found != string::npos) {
+            address = sctpParams.prometheusPushAddress.substr(0,found);
+            port = sctpParams.prometheusPushAddress.substr(found + 1);
+            sctpParams.prometheusGateway = new Gateway(address, port, "E2T", labels);
+            sctpParams.prometheusGateway->RegisterCollectable(sctpParams.prometheusRegistry);
+        } else {
+            mdclog_write(MDCLOG_ERR, "failed to build Prometheus gateway no stats will be sent");
+        }
+    }
+}
 
 int main(const int argc, char **argv) {
     sctp_params_t sctpParams;
@@ -325,6 +400,13 @@ int main(const int argc, char **argv) {
         exit(-1);
     }
 
+    //auto registry = std::make_shared<Registry>();
+    sctpParams.prometheusRegistry = std::make_shared<Registry>();
+
+    //sctpParams.promtheusFamily = new Family<Counter>("E2T", "E2T message counter", {{"E", sctpParams.podName}});
+
+    startPrometheus(sctpParams);
+
     // start epoll
     sctpParams.epoll_fd = epoll_create1(0);
     if (sctpParams.epoll_fd == -1) {
@@ -370,8 +452,6 @@ int main(const int argc, char **argv) {
         }
     }
 
-    auto statFlag = false;
-    auto statThread = std::thread(statColectorThread, (void *)&statFlag);
 
     //loop over term_init until first message from xApp
     handleTermInit(sctpParams);
@@ -380,9 +460,6 @@ int main(const int argc, char **argv) {
         t.join();
     }
 
-    statFlag = true;
-    statThread.join();
-
     return 0;
 }
 
@@ -530,7 +607,6 @@ void listener(sctp_params_t *params) {
         mdclog_write(MDCLOG_DEBUG, "started thread number %s", tid);
     }
 
-
     RmrMessagesBuffer_t rmrMessageBuffer{};
     //create and init RMR
     rmrMessageBuffer.rmrCtx = params->rmrCtx;
@@ -557,23 +633,39 @@ void listener(sctp_params_t *params) {
 //        rmrMessageBuffer.sendBufferedMessages[i] = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
 //    }
 
-    message.statCollector = StatCollector::GetInstance();
-
+    bool gatewayflag = false;
     while (true) {
+        future<int> gateWay;
+
         if (mdclog_level_get() >= MDCLOG_DEBUG) {
-            mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait");
+            mdclog_write(MDCLOG_DEBUG, "Start EPOLL Wait. Timeout = %d", params->epollTimeOut);
         }
-        auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, -1);
-        if (numOfEvents < 0 && errno == EINTR) {
-            if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
+        auto numOfEvents = epoll_wait(params->epoll_fd, events, MAXEVENTS, params->epollTimeOut);
+        if (numOfEvents == 0) {
+            if (params->prometheusGateway != nullptr) {
+                gateWay = params->prometheusGateway->AsyncPush();
+                gatewayflag = true;
             }
             continue;
-        }
-        if (numOfEvents < 0) {
+        } else if (numOfEvents < 0) {
+            if (errno == EINTR) {
+                if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                    mdclog_write(MDCLOG_DEBUG, "got EINTR : %s", strerror(errno));
+                }
+                continue;
+            }
             mdclog_write(MDCLOG_ERR, "Epoll wait failed, errno = %s", strerror(errno));
             return;
         }
+        if (gatewayflag) {
+            gatewayflag = false;
+            auto rc = gateWay.get();
+            if (rc != 200) {
+                mdclog_write(MDCLOG_ERR, "Async Send to Promethues faild with Return Code %d", rc);
+            } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "Stats sent to Prometheus");
+            }
+        }
         for (auto i = 0; i < numOfEvents; i++) {
             if (mdclog_level_get() >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "handling epoll event %d out of %d", i + 1, numOfEvents);
@@ -791,6 +883,17 @@ void handleConfigChange(sctp_params_t *sctpParams) {
                     sctpParams->trace = false;
                 }
                 jsonTrace = sctpParams->trace;
+
+                if (strcmp(sctpParams->prometheusMode.c_str(), "push") == 0) {
+                    auto timeout = conf.getIntValue("prometheusPushTimeOut");
+                    if (timeout >= 5 && timeout <= 300) {
+                        sctpParams->epollTimeOut = timeout * 1000;
+                    } else {
+                        mdclog_write(MDCLOG_ERR, "prometheusPushTimeOut set wrong value %d, values are [5..300]",
+                                     timeout);
+                    }
+                }
+
                 endlessLoop = false;
             }
         }
@@ -983,8 +1086,6 @@ int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_
             m->erase(key);
             return -1;
         }
-        // TODO remove stat update
-        //message.statCollector->incSentMessage(string(message.message.enodbName));
         message.message.direction = 'D';
         // send report.buffer of size
         buildJsonMessage(message);
@@ -1005,8 +1106,6 @@ int sendSctpMsg(ConnectedCU_t *peerInfo, ReportingMessages_t &message, Sctp_Map_
  * @param rmrMessageBuffer
  */
 void getRequestMetaData(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
-    rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *) (message.message.enodbName));
-
     message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
     message.message.asnLength = rmrMessageBuffer.rcvMessage->len;
 
@@ -1042,14 +1141,12 @@ int receiveDataFromSctp(struct epoll_event *events,
     // get the identity of the interface
     message.peerInfo = (ConnectedCU_t *)events->data.ptr;
 
-    message.statCollector = StatCollector::GetInstance();
     struct timespec start{0, 0};
     struct timespec decodestart{0, 0};
     struct timespec end{0, 0};
 
     E2AP_PDU_t *pdu = nullptr;
 
-
     while (true) {
         if (loglevel >= MDCLOG_DEBUG) {
             mdclog_write(MDCLOG_DEBUG, "Start Read from SCTP %d fd", message.peerInfo->fileDescriptor);
@@ -1066,7 +1163,6 @@ int receiveDataFromSctp(struct epoll_event *events,
         }
 
         memcpy(message.message.enodbName, message.peerInfo->enodbName, sizeof(message.peerInfo->enodbName));
-        message.statCollector->incRecvMessage(string(message.message.enodbName));
         message.message.direction = 'U';
         message.message.time.tv_nsec = ts.tv_nsec;
         message.message.time.tv_sec = ts.tv_sec;
@@ -1115,7 +1211,6 @@ int receiveDataFromSctp(struct epoll_event *events,
         if (rval.code != RC_OK) {
             mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2AP PDU from RAN : %s", rval.code,
                          message.peerInfo->enodbName);
-            //todo may need reset to pdu
             break;
         }
 
@@ -1194,91 +1289,285 @@ int receiveDataFromSctp(struct epoll_event *events,
 
 static void buildAndsendSetupRequest(ReportingMessages_t &message,
                                      RmrMessagesBuffer_t &rmrMessageBuffer,
-                                     E2AP_PDU_t *pdu,
-                                     vector<string> &repValues) {
+                                     E2AP_PDU_t *pdu/*,
+                                     string const &messageName,
+                                     string const &ieName,
+                                     vector<string> &functionsToAdd_v,
+                                     vector<string> &functionsToModified_v*/) {
     auto logLevel = mdclog_level_get();
-
     // now we can send the data to e2Mgr
-    auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
-
-    auto *rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size);
-    // add addrees to message
 
-
-    // unsigned char *buffer = &rmrMsg->payload[j];
+    asn_enc_rval_t er;
+    auto buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
     unsigned char buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
+    while (true) {
+        er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
+        if (er.encoded == -1) {
+            mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
+            return;
+        } else if (er.encoded > (ssize_t) buffer_size) {
+            buffer_size = er.encoded + 128;
+            mdclog_write(MDCLOG_WARN, "Buffer of size %d is to small for %s. Reallocate buffer of size %d",
+                         (int) buffer_size,
+                         asn_DEF_E2AP_PDU.name, buffer_size);
+            buffer_size = er.encoded + 128;
+//            free(buffer);
+            continue;
+        }
+        buffer[er.encoded] = '\0';
+        break;
+    }
     // encode to xml
-    auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, buffer, buffer_size);
-    if (er.encoded == -1) {
-        mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
-    } else if (er.encoded > (ssize_t) buffer_size) {
-        mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
-                     (int) buffer_size,
-                     asn_DEF_E2AP_PDU.name, __func__, __LINE__);
-    } else {
 
-        buildXmlData(repValues, buffer);
-        // we have the XML
-        rmrMsg->len = snprintf((char *)rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
+    string res((char *)buffer);
+    res.erase(std::remove(res.begin(), res.end(), '\n'), res.end());
+    res.erase(std::remove(res.begin(), res.end(), '\t'), res.end());
+    res.erase(std::remove(res.begin(), res.end(), ' '), res.end());
+
+//    string res {};
+//    if (!functionsToAdd_v.empty() || !functionsToModified_v.empty()) {
+//        res = buildXmlData(messageName, ieName, functionsToAdd_v, functionsToModified_v, buffer, (size_t) er.encoded);
+//    }
+    rmr_mbuf_t *rmrMsg;
+//    if (res.length() == 0) {
+//        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, buffer_size + 256);
+//        rmrMsg->len = snprintf((char *) rmrMsg->payload, RECEIVE_SCTP_BUFFER_SIZE * 2, "%s:%d|%s",
+//                               message.peerInfo->sctpParams->myIP.c_str(),
+//                               message.peerInfo->sctpParams->rmrPort,
+//                               buffer);
+//    } else {
+        rmrMsg = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, (int)res.length() + 256);
+        rmrMsg->len = snprintf((char *) rmrMsg->payload, res.length() + 256, "%s:%d|%s",
                                message.peerInfo->sctpParams->myIP.c_str(),
                                message.peerInfo->sctpParams->rmrPort,
-                               buffer);
-        if (logLevel >= MDCLOG_DEBUG) {
-            mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
-        }
-        // send to RMR
-        message.message.messageType = rmrMsg->mtype = RIC_E2_SETUP_REQ;
-        rmrMsg->state = 0;
-        rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
+                               res.c_str());
+//    }
 
-        static unsigned char tx[32];
-        snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
-        rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
-
-        rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
-        if (rmrMsg == nullptr) {
-            mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
-        } else if (rmrMsg->state != 0) {
-            char meid[RMR_MAX_MEID]{};
-            if (rmrMsg->state == RMR_ERR_RETRY) {
-                usleep(5);
-                rmrMsg->state = 0;
-                mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
-                             rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
-                rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
-                if (rmrMsg == nullptr) {
-                    mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
-                } else if (rmrMsg->state != 0) {
-                    mdclog_write(MDCLOG_ERR,
-                                 "RMR Retry failed %s sending request %d to Xapp from %s",
-                                 translateRmrErrorMessages(rmrMsg->state).c_str(),
-                                 rmrMsg->mtype,
-                                 rmr_get_meid(rmrMsg, (unsigned char *) meid));
-                }
-            } else {
-                mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
+    if (logLevel >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "Setup request of size %d :\n %s\n", rmrMsg->len, rmrMsg->payload);
+    }
+    // send to RMR
+    rmrMsg->mtype = message.message.messageType;
+    rmrMsg->state = 0;
+    rmr_bytes2meid(rmrMsg, (unsigned char *) message.message.enodbName, strlen(message.message.enodbName));
+
+    static unsigned char tx[32];
+    snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
+    rmr_bytes2xact(rmrMsg, tx, strlen((const char *) tx));
+
+    rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
+    if (rmrMsg == nullptr) {
+        mdclog_write(MDCLOG_ERR, "RMR failed to send returned nullptr");
+    } else if (rmrMsg->state != 0) {
+        char meid[RMR_MAX_MEID]{};
+        if (rmrMsg->state == RMR_ERR_RETRY) {
+            usleep(5);
+            rmrMsg->state = 0;
+            mdclog_write(MDCLOG_INFO, "RETRY sending Message %d to Xapp from %s",
+                         rmrMsg->mtype, rmr_get_meid(rmrMsg, (unsigned char *) meid));
+            rmrMsg = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMsg);
+            if (rmrMsg == nullptr) {
+                mdclog_write(MDCLOG_ERR, "RMR failed send returned nullptr");
+            } else if (rmrMsg->state != 0) {
+                mdclog_write(MDCLOG_ERR,
+                             "RMR Retry failed %s sending request %d to Xapp from %s",
                              translateRmrErrorMessages(rmrMsg->state).c_str(),
                              rmrMsg->mtype,
                              rmr_get_meid(rmrMsg, (unsigned char *) meid));
             }
+        } else {
+            mdclog_write(MDCLOG_ERR, "RMR failed: %s. sending request %d to Xapp from %s",
+                         translateRmrErrorMessages(rmrMsg->state).c_str(),
+                         rmrMsg->mtype,
+                         rmr_get_meid(rmrMsg, (unsigned char *) meid));
         }
-        message.peerInfo->gotSetup = true;
-        buildJsonMessage(message);
-        if (rmrMsg != nullptr) {
-            rmr_free_msg(rmrMsg);
+    }
+    message.peerInfo->gotSetup = true;
+    buildJsonMessage(message);
+    if (rmrMsg != nullptr) {
+        rmr_free_msg(rmrMsg);
+    }
+}
+
+#if 0
+int RAN_Function_list_To_Vector(RANfunctions_List_t& list, vector <string> &runFunXML_v) {
+    auto index = 0;
+    runFunXML_v.clear();
+    for (auto j = 0; j < list.list.count; j++) {
+        auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)list.list.array[j];
+        if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
+            (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
+            // encode to xml
+            E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
+            auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
+                                   &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
+                                   (void **)&ranFunDef,
+                                   raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
+                                   raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
+            if (rval.code != RC_OK) {
+                mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
+                             rval.code,
+                             asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
+                return -1;
+            }
+
+            auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
+            unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
+            memset(xml_buffer, 0, RECEIVE_SCTP_BUFFER_SIZE * 2);
+            // encode to xml
+            auto er = asn_encode_to_buffer(nullptr,
+                                           ATS_BASIC_XER,
+                                           &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
+                                           ranFunDef,
+                                           xml_buffer,
+                                           xml_buffer_size);
+            if (er.encoded == -1) {
+                mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
+                             asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
+                             strerror(errno));
+            } else if (er.encoded > (ssize_t)xml_buffer_size) {
+                mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
+                             (int) xml_buffer_size,
+                             asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
+            } else {
+                if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                    mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
+                                 asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
+                                 index++,
+                                 xml_buffer);
+                }
+
+                string runFuncs = (char *)(xml_buffer);
+                runFunXML_v.emplace_back(runFuncs);
+            }
         }
     }
+    return 0;
+}
 
+int collectServiceUpdate_RequestData(E2AP_PDU_t *pdu,
+                                     Sctp_Map_t *sctpMap,
+                                     ReportingMessages_t &message,
+                                     vector <string> &RANfunctionsAdded_v,
+                                     vector <string> &RANfunctionsModified_v) {
+    memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
+    for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.count; i++) {
+        auto *ie = pdu->choice.initiatingMessage->value.choice.RICserviceUpdate.protocolIEs.list.array[i];
+        if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
+            if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctionsID_List) {
+                if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                    mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
+                                 ie->value.choice.RANfunctions_List.list.count);
+                }
+                if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
+                    return -1;
+                }
+            }
+        } else if (ie->id == ProtocolIE_ID_id_RANfunctionsModified) {
+            if (ie->value.present == RICserviceUpdate_IEs__value_PR_RANfunctions_List) {
+                if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                    mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
+                                 ie->value.choice.RANfunctions_List.list.count);
+                }
+                if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsModified_v) != 0 ) {
+                    return -1;
+                }
+            }
+        }
+    }
+    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
+                     RANfunctionsAdded_v.size());
+    }
+    return 0;
 }
 
+#endif
+
+
+void buildPrometheuslist(ConnectedCU_t *peerInfo, Family<Counter> *prometheusFamily) {
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"SetupRequest", "Bytes"}});
+
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ErrorIndication", "Bytes"}});
+
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICindication)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICindication", "Bytes"}});
+
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetRequest", "Bytes"}});
+
+    peerInfo->counters[IN_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Messages"}});
+    peerInfo->counters[IN_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICserviceUpdate", "Bytes"}});
+    // ---------------------------------------------
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"ResetACK", "Bytes"}});
+
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolACK", "Bytes"}});
+
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionACK", "Bytes"}});
+
+    peerInfo->counters[IN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Messages"}});
+    peerInfo->counters[IN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteACK", "Bytes"}});
+    //-------------------------------------------------------------
+
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICcontrolFailure", "Bytes"}});
+
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionFailure", "Bytes"}});
+
+    peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Messages"}});
+    peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "IN"}, {"RICsubscriptionDeleteFailure", "Bytes"}});
+
+    //====================================================================================
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_ErrorIndication)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ErrorIndication", "Bytes"}});
 
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetRequest", "Bytes"}});
 
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICcontrol)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICcontrol", "Bytes"}});
 
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICserviceQuery)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceQuery", "Bytes"}});
+
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscription)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscription", "Bytes"}});
+
+    peerInfo->counters[OUT_INITI][MSG_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Messages"}});
+    peerInfo->counters[OUT_INITI][BYTES_COUNTER][(ProcedureCode_id_RICsubscriptionDelete)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICsubscriptionDelete", "Bytes"}});
+    //---------------------------------------------------------------------------------------------------------
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupResponse", "Bytes"}});
+
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_Reset)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"ResetACK", "Bytes"}});
+
+    peerInfo->counters[OUT_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Messages"}});
+    peerInfo->counters[OUT_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateResponse", "Bytes"}});
+    //----------------------------------------------------------------------------------------------------------------
+    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Messages"}});
+    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_E2setup)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"SetupRequestFailure", "Bytes"}});
+
+    peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Messages"}});
+    peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][(ProcedureCode_id_RICserviceUpdate)] = &prometheusFamily->Add({{peerInfo->enodbName, "OUT"}, {"RICserviceUpdateFailure", "Bytes"}});
+}
+/**
+ *
+ * @param pdu
+ * @param sctpMap
+ * @param message
+ * @param RANfunctionsAdded_v
+ * @return
+ */
 int collectSetupRequestData(E2AP_PDU_t *pdu,
-                            Sctp_Map_t *sctpMap,
-                            ReportingMessages_t &message,
-                            vector <string> &runFunDEFXML_v) {
-    auto index = 0;
+                                     Sctp_Map_t *sctpMap,
+                                     ReportingMessages_t &message /*, vector <string> &RANfunctionsAdded_v*/) {
     memset(message.peerInfo->enodbName, 0 , MAX_ENODB_NAME_SIZE);
     for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.count; i++) {
         auto *ie = pdu->choice.initiatingMessage->value.choice.E2setupRequest.protocolIEs.list.array[i];
@@ -1290,84 +1579,64 @@ int collectSetupRequestData(E2AP_PDU_t *pdu,
                     // no mesage will be sent
                     return -1;
                 }
+
                 memcpy(message.message.enodbName, message.peerInfo->enodbName, strlen(message.peerInfo->enodbName));
                 sctpMap->setkey(message.message.enodbName, message.peerInfo);
             }
-        }
-        // reformat RANFUNCTION Definition to XML
-        if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
+        } /*else if (ie->id == ProtocolIE_ID_id_RANfunctionsAdded) {
             if (ie->value.present == E2setupRequestIEs__value_PR_RANfunctions_List) {
                 if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                    mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries and size %d",
-                                 ie->value.choice.RANfunctions_List.list.count,
-                                 ie->value.choice.RANfunctions_List.list.size);
+                    mdclog_write(MDCLOG_DEBUG, "Run function list have %d entries",
+                                 ie->value.choice.RANfunctions_List.list.count);
                 }
-                for (auto j = 0; i < ie->value.choice.RANfunctions_List.list.count; i++) {
-                    auto *raNfunctionItemIEs = (RANfunction_ItemIEs_t *)ie->value.choice.RANfunctions_List.list.array[j];
-                    if (raNfunctionItemIEs->id == ProtocolIE_ID_id_RANfunction_Item &&
-                        (raNfunctionItemIEs->value.present == RANfunction_ItemIEs__value_PR_RANfunction_Item)) {
-                        // encode to xml
-                        E2SM_gNB_NRT_RANfunction_Definition_t *ranFunDef = nullptr;
-                        auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER,
-                                               &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
-                                               (void **)&ranFunDef,
-                                               raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.buf,
-                                               raNfunctionItemIEs->value.choice.RANfunction_Item.ranFunctionDefinition.size);
-                        if (rval.code != RC_OK) {
-                            mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) E2SM message from : %s",
-                                         rval.code,
-                                         asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name);
-                            return -1;
-                        }
-
-//                        if (mdclog_level_get() >= MDCLOG_DEBUG) {
-//                            char *printBuffer;
-//                            size_t size;
-//                            FILE *stream = open_memstream(&printBuffer, &size);
-//                            asn_fprint(stream, &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition, ranFunDef);
-//                            mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU past : %s",
-//                                         asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
-//                                         printBuffer);
-//                        }
-                        auto xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
-                        unsigned char xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
-                        // encode to xml
-                        auto er = asn_encode_to_buffer(nullptr,
-                                                  ATS_BASIC_XER,
-                                                  &asn_DEF_E2SM_gNB_NRT_RANfunction_Definition,
-                                                  ranFunDef,
-                                                  xml_buffer,
-                                                  xml_buffer_size);
-                        if (er.encoded == -1) {
-                            mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s",
-                                         asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
-                                         strerror(errno));
-                        } else if (er.encoded > (ssize_t)xml_buffer_size) {
-                            mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
-                                         (int) xml_buffer_size,
-                                         asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name, __func__, __LINE__);
-                        } else {
-                            if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                                mdclog_write(MDCLOG_DEBUG, "Encoding E2SM %s PDU number %d : %s",
-                                             asn_DEF_E2SM_gNB_NRT_RANfunction_Definition.name,
-                                             index++,
-                                             xml_buffer);
-                            }
-                            string runFuncs = (char *)(xml_buffer);
-                            runFunDEFXML_v.emplace_back(runFuncs);
-                        }
-
-                    }
+                if (RAN_Function_list_To_Vector(ie->value.choice.RANfunctions_List, RANfunctionsAdded_v) != 0 ) {
+                    return -1;
                 }
             }
-        }
+        } */
     }
+//    if (mdclog_level_get() >= MDCLOG_DEBUG) {
+//        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
+//                     RANfunctionsAdded_v.size());
+//    }
+    return 0;
+}
+
+int XML_From_PER(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
+    E2AP_PDU_t *pdu = nullptr;
+
     if (mdclog_level_get() >= MDCLOG_DEBUG) {
-        mdclog_write(MDCLOG_DEBUG, "Run function vector have %ld entries",
-                     runFunDEFXML_v.size());
+        mdclog_write(MDCLOG_DEBUG, "got PER message of size %d is:%s",
+                     rmrMessageBuffer.sendMessage->len, rmrMessageBuffer.sendMessage->payload);
+    }
+    auto rval = asn_decode(nullptr, ATS_ALIGNED_BASIC_PER, &asn_DEF_E2AP_PDU, (void **) &pdu,
+                           rmrMessageBuffer.sendMessage->payload, rmrMessageBuffer.sendMessage->len);
+    if (rval.code != RC_OK) {
+        mdclog_write(MDCLOG_ERR, "Error %d Decoding (unpack) setup response  from E2MGR : %s",
+                     rval.code,
+                     message.message.enodbName);
+        return -1;
+    }
+
+    int buff_size = RECEIVE_XAPP_BUFFER_SIZE;
+    auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu,
+                                   rmrMessageBuffer.sendMessage->payload, buff_size);
+    if (er.encoded == -1) {
+        mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
+        return -1;
+    } else if (er.encoded > (ssize_t)buff_size) {
+        mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
+                     (int)rmrMessageBuffer.sendMessage->len,
+                     asn_DEF_E2AP_PDU.name,
+                     __func__,
+                     __LINE__);
+        return -1;
     }
+    rmrMessageBuffer.sendMessage->len = er.encoded;
     return 0;
+
 }
+
 /**
  *
  * @param pdu
@@ -1386,41 +1655,55 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
     switch (procedureCode) {
         case ProcedureCode_id_E2setup: {
             if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got E2setup\n");
+                mdclog_write(MDCLOG_DEBUG, "Got E2setup");
             }
 
-            // first get the message as XML buffer
-            auto setup_xml_buffer_size = RECEIVE_SCTP_BUFFER_SIZE * 2;
-            unsigned char setup_xml_buffer[RECEIVE_SCTP_BUFFER_SIZE * 2];
-            //unsigned char *tmp_buff_cursor = setup_xml_buffer;
-
-            auto er = asn_encode_to_buffer(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, pdu, setup_xml_buffer, setup_xml_buffer_size);
-            if (er.encoded == -1) {
-                mdclog_write(MDCLOG_ERR, "encoding of %s failed, %s", asn_DEF_E2AP_PDU.name, strerror(errno));
-            } else if (er.encoded > (ssize_t) setup_xml_buffer_size) {
-                mdclog_write(MDCLOG_ERR, "Buffer of size %d is to small for %s, at %s line %d",
-                             (int)setup_xml_buffer_size,
-                             asn_DEF_E2AP_PDU.name, __func__, __LINE__);
+//            vector <string> RANfunctionsAdded_v;
+//            vector <string> RANfunctionsModified_v;
+//            RANfunctionsAdded_v.clear();
+//            RANfunctionsModified_v.clear();
+            if (collectSetupRequestData(pdu, sctpMap, message) != 0) {
+                break;
             }
-            std::string xmlString(setup_xml_buffer_size,  setup_xml_buffer_size + er.encoded);
 
-            vector <string> runFunDEFXML_v;
-            runFunDEFXML_v.clear();
+            buildPrometheuslist(message.peerInfo, message.peerInfo->sctpParams->prometheusFamily);
 
-            auto ret = collectSetupRequestData(pdu, sctpMap,  message, runFunDEFXML_v);
-            if (ret != 0) {
-                break;
+            string messageName("E2setupRequest");
+            string ieName("E2setupRequestIEs");
+            message.message.messageType = RIC_E2_SETUP_REQ;
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment((double)message.message.asnLength);
+            buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
+            break;
+        }
+        case ProcedureCode_id_RICserviceUpdate: {
+            if (logLevel >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
             }
+//            vector <string> RANfunctionsAdded_v;
+//            vector <string> RANfunctionsModified_v;
+//            RANfunctionsAdded_v.clear();
+//            RANfunctionsModified_v.clear();
+//            if (collectServiceUpdate_RequestData(pdu, sctpMap, message,
+//                                                 RANfunctionsAdded_v, RANfunctionsModified_v) != 0) {
+//                break;
+//            }
 
-            //build all parts and send the XML (need to copy the XML with the header to the rmrMessageBuffer payload
-            //TODO replace with new function
-            buildAndsendSetupRequest(message, rmrMessageBuffer, pdu, runFunDEFXML_v);
+            string messageName("RICserviceUpdate");
+            string ieName("RICserviceUpdateIEs");
+            message.message.messageType = RIC_SERVICE_UPDATE;
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment((double)message.message.asnLength);
+
+            buildAndsendSetupRequest(message, rmrMessageBuffer, pdu);
             break;
         }
         case ProcedureCode_id_ErrorIndication: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
             }
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
             }
@@ -1430,14 +1713,15 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
             }
-            if (sendRequestToXapp(message, RIC_X2_RESET, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_X2_RESET message failed to send to xAPP");
+
+            message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
+            if (XML_From_PER(message, rmrMessageBuffer) < 0) {
+                break;
             }
-            break;
-        }
-        case ProcedureCode_id_RICcontrol: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
+
+            if (sendRequestToXapp(message, RIC_E2_RESET_REQ, rmrMessageBuffer) != 0) {
+                mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_REQ message failed to send to xAPP");
             }
             break;
         }
@@ -1474,11 +1758,10 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
                                          ie->value.choice.RICrequestID.ricInstanceID,
                                          ie->value.choice.RICrequestID.ricRequestorID);
                         }
+                        message.peerInfo->counters[IN_INITI][MSG_COUNTER][ProcedureCode_id_RICindication]->Increment();
+                        message.peerInfo->counters[IN_INITI][BYTES_COUNTER][ProcedureCode_id_RICindication]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
-                    } else if (ie->value.present == RICindication_IEs__value_PR_RICindicationSN) {
-                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICindicationSN;
-
                     } else {
                         mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
                     }
@@ -1489,33 +1772,6 @@ void asnInitiatingRequest(E2AP_PDU_t *pdu,
             }
             break;
         }
-        case ProcedureCode_id_RICserviceQuery: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceQuery %s", message.message.enodbName);
-            }
-            break;
-        }
-        case ProcedureCode_id_RICserviceUpdate: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_SERVICE_UPDATE, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_SERVICE_UPDATE message failed to send to xAPP");
-            }
-            break;
-        }
-        case ProcedureCode_id_RICsubscription: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
-            }
-            break;
-        }
-        case ProcedureCode_id_RICsubscriptionDelete: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
-            }
-            break;
-        }
         default: {
             mdclog_write(MDCLOG_ERR, "Undefined or not supported message = %ld", procedureCode);
             message.message.messageType = 0; // no RMR message type yet
@@ -1543,27 +1799,17 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
         mdclog_write(MDCLOG_INFO, "Successful Outcome %ld", procedureCode);
     }
     switch (procedureCode) {
-        case ProcedureCode_id_E2setup: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got E2setup\n");
-            }
-            break;
-        }
-        case ProcedureCode_id_ErrorIndication: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
-            }
-            break;
-        }
         case ProcedureCode_id_Reset: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
             }
-            if (sendRequestToXapp(message, RIC_X2_RESET, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_X2_RESET message failed to send to xAPP");
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment((double)message.message.asnLength);
+            if (XML_From_PER(message, rmrMessageBuffer) < 0) {
+                break;
+            }
+            if (sendRequestToXapp(message, RIC_E2_RESET_RESP, rmrMessageBuffer) != 0) {
+                mdclog_write(MDCLOG_ERR, "RIC_E2_RESET_RESP message failed to send to xAPP");
             }
             break;
         }
@@ -1585,7 +1831,9 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
                     if (ie->value.present == RICcontrolAcknowledge_IEs__value_PR_RICrequestID) {
                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_ACK;
                         rmrMessageBuffer.sendMessage->state = 0;
-                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
+//                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
+                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
+
                         static unsigned char tx[32];
                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
@@ -1593,6 +1841,8 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
                                        (unsigned char *)message.message.enodbName,
                                        strlen(message.message.enodbName));
 
+                        message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+                        message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
                     } else {
@@ -1606,66 +1856,12 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
 
             break;
         }
-        case ProcedureCode_id_RICindication: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
-            }
-            for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
-                auto messageSent = false;
-                RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
-                if (logLevel >= MDCLOG_DEBUG) {
-                    mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
-                }
-                if (ie->id == ProtocolIE_ID_id_RICrequestID) {
-                    if (logLevel >= MDCLOG_DEBUG) {
-                        mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
-                    }
-                    if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
-                        static unsigned char tx[32];
-                        message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
-                        snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
-                        rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
-                        rmr_bytes2meid(rmrMessageBuffer.sendMessage,
-                                       (unsigned char *)message.message.enodbName,
-                                       strlen(message.message.enodbName));
-                        rmrMessageBuffer.sendMessage->state = 0;
-                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
-                        if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                            mdclog_write(MDCLOG_DEBUG, "RIC sub id = %d, message type = %d",
-                                         rmrMessageBuffer.sendMessage->sub_id,
-                                         rmrMessageBuffer.sendMessage->mtype);
-                        }
-                        sendRmrMessage(rmrMessageBuffer, message);
-                        messageSent = true;
-                    } else {
-                        mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
-                    }
-                }
-                if (messageSent) {
-                    break;
-                }
-            }
-            break;
-        }
-        case ProcedureCode_id_RICserviceQuery: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceQuery %s", message.message.enodbName);
-            }
-            break;
-        }
-        case ProcedureCode_id_RICserviceUpdate: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_SERVICE_UPDATE, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_SERVICE_UPDATE message failed to send to xAPP");
-            }
-            break;
-        }
         case ProcedureCode_id_RICsubscription: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
             }
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_RESP, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription successful message failed to send to xAPP");
             }
@@ -1675,6 +1871,8 @@ void asnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
             }
+            message.peerInfo->counters[IN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[IN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_DEL_RESP, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription delete successful message failed to send to xAPP");
             }
@@ -1706,30 +1904,6 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
         mdclog_write(MDCLOG_INFO, "Unsuccessful Outcome %ld", procedureCode);
     }
     switch (procedureCode) {
-        case ProcedureCode_id_E2setup: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got E2setup\n");
-            }
-            break;
-        }
-        case ProcedureCode_id_ErrorIndication: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got ErrorIndication %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_ERROR_INDICATION, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_ERROR_INDICATION failed to send to xAPP");
-            }
-            break;
-        }
-        case ProcedureCode_id_Reset: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got Reset %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_X2_RESET, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_X2_RESET message failed to send to xAPP");
-            }
-            break;
-        }
         case ProcedureCode_id_RICcontrol: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICcontrol %s", message.message.enodbName);
@@ -1748,12 +1922,15 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
                     if (ie->value.present == RICcontrolFailure_IEs__value_PR_RICrequestID) {
                         message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_CONTROL_FAILURE;
                         rmrMessageBuffer.sendMessage->state = 0;
-                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
+//                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricRequestorID;
+                        rmrMessageBuffer.sendMessage->sub_id = (int)ie->value.choice.RICrequestID.ricInstanceID;
                         static unsigned char tx[32];
                         snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
                         rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
                         rmr_bytes2meid(rmrMessageBuffer.sendMessage, (unsigned char *) message.message.enodbName,
                                        strlen(message.message.enodbName));
+                        message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+                        message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment((double)message.message.asnLength);
                         sendRmrMessage(rmrMessageBuffer, message);
                         messageSent = true;
                     } else {
@@ -1766,66 +1943,12 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
             }
             break;
         }
-        case ProcedureCode_id_RICindication: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICindication %s", message.message.enodbName);
-            }
-            for (auto i = 0; i < pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.count; i++) {
-                auto messageSent = false;
-                RICindication_IEs_t *ie = pdu->choice.initiatingMessage->value.choice.RICindication.protocolIEs.list.array[i];
-                if (logLevel >= MDCLOG_DEBUG) {
-                    mdclog_write(MDCLOG_DEBUG, "ie type (ProtocolIE_ID) = %ld", ie->id);
-                }
-                if (ie->id == ProtocolIE_ID_id_RICrequestID) {
-                    if (logLevel >= MDCLOG_DEBUG) {
-                        mdclog_write(MDCLOG_DEBUG, "Got RIC requestId entry, ie type (ProtocolIE_ID) = %ld", ie->id);
-                    }
-                    if (ie->value.present == RICindication_IEs__value_PR_RICrequestID) {
-                        static unsigned char tx[32];
-                        message.message.messageType = rmrMessageBuffer.sendMessage->mtype = RIC_INDICATION;
-                        snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
-                        rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, strlen((const char *) tx));
-                        rmr_bytes2meid(rmrMessageBuffer.sendMessage,
-                                       (unsigned char *)message.message.enodbName,
-                                       strlen(message.message.enodbName));
-                        rmrMessageBuffer.sendMessage->state = 0;
-                        rmrMessageBuffer.sendMessage->sub_id = (int) ie->value.choice.RICrequestID.ricRequestorID;
-                        if (mdclog_level_get() >= MDCLOG_DEBUG) {
-                            mdclog_write(MDCLOG_DEBUG, "RIC sub id = %d, message type = %d",
-                                         rmrMessageBuffer.sendMessage->sub_id,
-                                         rmrMessageBuffer.sendMessage->mtype);
-                        }
-                        sendRmrMessage(rmrMessageBuffer, message);
-                        messageSent = true;
-                    } else {
-                        mdclog_write(MDCLOG_ERR, "RIC request id missing illigal request");
-                    }
-                }
-                if (messageSent) {
-                    break;
-                }
-            }
-            break;
-        }
-        case ProcedureCode_id_RICserviceQuery: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceQuery %s", message.message.enodbName);
-            }
-            break;
-        }
-        case ProcedureCode_id_RICserviceUpdate: {
-            if (logLevel >= MDCLOG_DEBUG) {
-                mdclog_write(MDCLOG_DEBUG, "Got RICserviceUpdate %s", message.message.enodbName);
-            }
-            if (sendRequestToXapp(message, RIC_SERVICE_UPDATE, rmrMessageBuffer) != 0) {
-                mdclog_write(MDCLOG_ERR, "RIC_SERVICE_UPDATE message failed to send to xAPP");
-            }
-            break;
-        }
         case ProcedureCode_id_RICsubscription: {
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscription %s", message.message.enodbName);
             }
+            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment((double)message.message.asnLength);
             if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription unsuccessful message failed to send to xAPP");
             }
@@ -1835,7 +1958,9 @@ void asnUnSuccsesfulMsg(E2AP_PDU_t *pdu,
             if (logLevel >= MDCLOG_DEBUG) {
                 mdclog_write(MDCLOG_DEBUG, "Got RICsubscriptionDelete %s", message.message.enodbName);
             }
-            if (sendRequestToXapp(message, RIC_SUB_DEL_FAILURE, rmrMessageBuffer) != 0) {
+            message.peerInfo->counters[IN_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[IN_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment((double)message.message.asnLength);
+            if (sendRequestToXapp(message, RIC_SUB_FAILURE, rmrMessageBuffer) != 0) {
                 mdclog_write(MDCLOG_ERR, "Subscription Delete unsuccessful message failed to send to xAPP");
             }
             break;
@@ -1874,10 +1999,13 @@ int sendRequestToXapp(ReportingMessages_t &message,
     return rc;
 }
 
-
+/**
+ *
+ * @param pSctpParams
+ */
 void getRmrContext(sctp_params_t &pSctpParams) {
     pSctpParams.rmrCtx = nullptr;
-    pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RMR_MAX_RCV_BYTES, RMRFL_NONE);
+    pSctpParams.rmrCtx = rmr_init(pSctpParams.rmrAddress, RECEIVE_XAPP_BUFFER_SIZE, RMRFL_NONE);
     if (pSctpParams.rmrCtx == nullptr) {
         mdclog_write(MDCLOG_ERR, "Failed to initialize RMR");
         return;
@@ -1918,11 +2046,18 @@ void getRmrContext(sctp_params_t &pSctpParams) {
     }
 }
 
-int BuildPERSetupResponseMessaeFromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
-    E2AP_PDU_t *pdu;
+/**
+ *
+ * @param message
+ * @param rmrMessageBuffer
+ * @return
+ */
+int PER_FromXML(ReportingMessages_t &message, RmrMessagesBuffer_t &rmrMessageBuffer) {
+    E2AP_PDU_t *pdu = nullptr;
 
     if (mdclog_level_get() >= MDCLOG_DEBUG) {
-        mdclog_write(MDCLOG_DEBUG, "got xml setup response \n %s\n", rmrMessageBuffer.rcvMessage->payload);
+        mdclog_write(MDCLOG_DEBUG, "got xml Format  data from xApp of size %d is:%s",
+                rmrMessageBuffer.rcvMessage->len, rmrMessageBuffer.rcvMessage->payload);
     }
     auto rval = asn_decode(nullptr, ATS_BASIC_XER, &asn_DEF_E2AP_PDU, (void **) &pdu,
                            rmrMessageBuffer.rcvMessage->payload, rmrMessageBuffer.rcvMessage->len);
@@ -1988,12 +2123,27 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
         return -1;
     }
     rmr_get_meid(rmrMessageBuffer.rcvMessage, (unsigned char *)message.message.enodbName);
+    message.peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
+    if (message.peerInfo == nullptr) {
+        auto type = rmrMessageBuffer.rcvMessage->mtype;
+        switch (type) {
+            case RIC_SCTP_CLEAR_ALL:
+            case E2_TERM_KEEP_ALIVE_REQ:
+            case RIC_HEALTH_CHECK_REQ:
+                break;
+            default:
+                mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
+                return -1;
+        }
+    }
+
     switch (rmrMessageBuffer.rcvMessage->mtype) {
         case RIC_E2_SETUP_RESP : {
-            if (BuildPERSetupResponseMessaeFromXML(message, rmrMessageBuffer) != 0) {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
-
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_RESP");
                 return -6;
@@ -2001,9 +2151,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_E2_SETUP_FAILURE : {
-            if (BuildPERSetupResponseMessaeFromXML(message, rmrMessageBuffer) != 0) {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
                 break;
             }
+            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_E2setup]->Increment();
+            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_E2setup]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_SETUP_FAILURE");
                 return -6;
@@ -2011,6 +2163,8 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_ERROR_INDICATION: {
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_ErrorIndication]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_ErrorIndication]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_ERROR_INDICATION");
                 return -6;
@@ -2018,6 +2172,8 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SUB_REQ: {
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscription]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscription]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_REQ");
                 return -6;
@@ -2025,6 +2181,8 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SUB_DEL_REQ: {
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICsubscriptionDelete]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SUB_DEL_REQ");
                 return -6;
@@ -2032,6 +2190,8 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_CONTROL_REQ: {
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICcontrol]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICcontrol]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_CONTROL_REQ");
                 return -6;
@@ -2039,6 +2199,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_QUERY: {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                break;
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_QUERY");
                 return -6;
@@ -2046,6 +2211,11 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_UPDATE_ACK: {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                break;
+            }
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_ACK");
                 return -6;
@@ -2053,22 +2223,37 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
             break;
         }
         case RIC_SERVICE_UPDATE_FAILURE: {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                break;
+            }
+            message.peerInfo->counters[OUT_UN_SUCC][MSG_COUNTER][ProcedureCode_id_RICserviceUpdate]->Increment();
+            message.peerInfo->counters[OUT_UN_SUCC][BYTES_COUNTER][ProcedureCode_id_RICserviceQuery]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
                 mdclog_write(MDCLOG_ERR, "Failed to send RIC_SERVICE_UPDATE_FAILURE");
                 return -6;
             }
             break;
         }
-        case RIC_X2_RESET: {
+        case RIC_E2_RESET_REQ: {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                break;
+            }
+            message.peerInfo->counters[OUT_INITI][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[OUT_INITI][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
-                mdclog_write(MDCLOG_ERR, "Failed to send RIC_X2_RESET");
+                mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET");
                 return -6;
             }
             break;
         }
-        case RIC_X2_RESET_RESP: {
+        case RIC_E2_RESET_RESP: {
+            if (PER_FromXML(message, rmrMessageBuffer) != 0) {
+                break;
+            }
+            message.peerInfo->counters[OUT_SUCC][MSG_COUNTER][ProcedureCode_id_Reset]->Increment();
+            message.peerInfo->counters[OUT_SUCC][BYTES_COUNTER][ProcedureCode_id_Reset]->Increment(rmrMessageBuffer.rcvMessage->len);
             if (sendDirectionalSctpMsg(rmrMessageBuffer, message, 0, sctpMap) != 0) {
-                mdclog_write(MDCLOG_ERR, "Failed to send RIC_X2_RESET_RESP");
+                mdclog_write(MDCLOG_ERR, "Failed to send RIC_E2_RESET_RESP");
                 return -6;
             }
             break;
@@ -2131,6 +2316,30 @@ int receiveXappMessages(Sctp_Map_t *sctpMap,
 
             break;
         }
+        case RIC_HEALTH_CHECK_REQ: {
+            // send message back
+            rmr_bytes2payload(rmrMessageBuffer.sendMessage,
+                              (unsigned char *)"OK",
+                              2);
+            rmrMessageBuffer.sendMessage->mtype = RIC_HEALTH_CHECK_RESP;
+            rmrMessageBuffer.sendMessage->state = 0;
+            static unsigned char tx[32];
+            auto txLen = snprintf((char *) tx, sizeof tx, "%15ld", transactionCounter++);
+            rmr_bytes2xact(rmrMessageBuffer.sendMessage, tx, txLen);
+            rmrMessageBuffer.sendMessage = rmr_send_msg(rmrMessageBuffer.rmrCtx, rmrMessageBuffer.sendMessage);
+            if (rmrMessageBuffer.sendMessage == nullptr) {
+                rmrMessageBuffer.sendMessage = rmr_alloc_msg(rmrMessageBuffer.rmrCtx, RECEIVE_XAPP_BUFFER_SIZE);
+                mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP RMR message returned NULL");
+            } else if (rmrMessageBuffer.sendMessage->state != 0)  {
+                mdclog_write(MDCLOG_ERR, "Failed to send RIC_HEALTH_CHECK_RESP, on RMR state = %d ( %s)",
+                             rmrMessageBuffer.sendMessage->state, translateRmrErrorMessages(rmrMessageBuffer.sendMessage->state).c_str());
+            } else if (mdclog_level_get() >= MDCLOG_DEBUG) {
+                mdclog_write(MDCLOG_DEBUG, "Got RIC_HEALTH_CHECK_REQ Request send : OK");
+            }
+
+            break;
+        }
+
         default:
             mdclog_write(MDCLOG_WARN, "Message Type : %d is not seported", rmrMessageBuffer.rcvMessage->mtype);
             message.message.asndata = rmrMessageBuffer.rcvMessage->payload;
@@ -2184,19 +2393,9 @@ int sendMessagetoCu(Sctp_Map_t *sctpMap,
                     RmrMessagesBuffer_t &messageBuffer,
                     ReportingMessages_t &message,
                     int failedMesgId) {
-    auto *peerInfo = (ConnectedCU_t *) sctpMap->find(message.message.enodbName);
-    if (peerInfo == nullptr) {
-        if (failedMesgId != 0) {
-            sendFailedSendingMessagetoXapp(messageBuffer, message, failedMesgId);
-        } else {
-            mdclog_write(MDCLOG_ERR, "Failed to send message no CU entry %s", message.message.enodbName);
-        }
-        return -1;
-    }
-
     // get the FD
     message.message.messageType = messageBuffer.rcvMessage->mtype;
-    auto rc = sendSctpMsg(peerInfo, message, sctpMap);
+    auto rc = sendSctpMsg(message.peerInfo, message, sctpMap);
     return rc;
 }