Implement a version of the O1/VES interface for SMO 75/6075/12 dawn
authorsantanude <santanu.de@xoriant.com>
Thu, 13 May 2021 14:41:38 +0000 (20:11 +0530)
committersantanude <santanu.de@xoriant.com>
Thu, 27 May 2021 07:31:32 +0000 (13:01 +0530)
 
Developed Ves-Collector to support different types of events. Ves-collector validates incoming requests as per schema and persists valid events data in InfluxDb.

Issue-Id: SMO-8
Signed-off-by: santanude <santanu.de@xoriant.com>
Change-Id: I07d5bc027ef36cd924ec3bd97fdb6fb724ea4444
Signed-off-by: santanude <santanu.de@xoriant.com>
23 files changed:
README [new file with mode: 0644]
collector/Dashboard.json [new file with mode: 0644]
collector/Dockerfile [new file with mode: 0644]
collector/Makefile [new file with mode: 0644]
collector/datasource.json [new file with mode: 0644]
collector/evel-test-collector/LICENSE.md [new file with mode: 0644]
collector/evel-test-collector/code/README.md [new file with mode: 0644]
collector/evel-test-collector/code/collector/collector.py [new file with mode: 0644]
collector/evel-test-collector/code/collector/monitor.py [new file with mode: 0755]
collector/evel-test-collector/code/collector/rest_dispatcher.py [new file with mode: 0644]
collector/evel-test-collector/code/collector/test_control.py [new file with mode: 0644]
collector/evel-test-collector/config/README.md [new file with mode: 0644]
collector/evel-test-collector/config/collector.conf [new file with mode: 0644]
collector/evel-test-collector/docs/att_interface_definition/CommonEventFormat_30.2.1_ONAP.json [new file with mode: 0644]
collector/evel-test-collector/docs/att_interface_definition/README.md [new file with mode: 0644]
collector/evel-test-collector/docs/test_collector_user_guide/README.md [new file with mode: 0644]
collector/evel-test-collector/docs/test_collector_user_guide/test_collector_user_guide.md [new file with mode: 0644]
collector/evel-test-collector/scripts/README.md [new file with mode: 0644]
collector/evel-test-collector/scripts/linux/go-collector.sh [new file with mode: 0755]
collector/evel-test-collector/scripts/windows/go-collector.bat [new file with mode: 0644]
collector/start.sh [new file with mode: 0755]
collector/ves-start.sh [new file with mode: 0755]
collector/ves-stop.sh [new file with mode: 0755]

diff --git a/README b/README
new file mode 100644 (file)
index 0000000..9f2dc35
--- /dev/null
+++ b/README
@@ -0,0 +1,24 @@
+This repository supports the VES collector interface in O-RAN. It
+makes use of three containers, the ves-collector container that
+collects VES events posted by other parts of the O-RAN solution,
+grafana, which is used to display measurement (PM) data posted
+by other entities and influxdb which is used to persist the data
+received by the collector.
+
+PREREQUISITES:
+
+The prerequisite to use this solution is that you need Docker
+running on the machine, where you want to run these containers.
+
+BUILD:
+
+To build the solution, you need to do the following in the collector
+folder.
+
+% make
+
+RUN:
+
+There are two scripts in the collector folder. A ves-start.sh script
+which starts the VES collector and other parts. A ves-stop.sh script
+can be used to stop the collector.
\ No newline at end of file
diff --git a/collector/Dashboard.json b/collector/Dashboard.json
new file mode 100644 (file)
index 0000000..b88646c
--- /dev/null
@@ -0,0 +1,996 @@
+{
+"dashboard": {
+  "description": "This Dashboard provides a general overview of a host, with templating to select the hostname.",
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "hideControls": false,
+  "id": null,
+  "links": [],
+  "refresh": "10s",
+  "rows": [
+    {
+      "collapse": false,
+      "height": 401,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null
+          },
+          "id": 3,
+          "interval": "30s",
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "rightSide": false,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "load",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
+              "refId": "B",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "load-shortterm"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "host load",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ],
+          "yaxes": [
+            {
+              "format": "short",
+              "label": "Percent",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null
+          },
+          "id": 6,
+          "interval": "30s",
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "rightSide": false,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                },
+                {
+                  "params": [
+                    "cpu"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "cpuUsage",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
+              "refId": "B",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "cpuUsageUser"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "host CPU Usage User",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ],
+          "yaxes": [
+            {
+              "format": "short",
+              "label": "Percent",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    },
+    {
+      "collapse": false,
+      "height": 442,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null
+          },
+          "id": 2,
+          "interval": "30s",
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "rightSide": false,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                },
+                {
+                  "params": [
+                    "vnic"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "vNicPerformance",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT derivative(mean(\"rxoctetsacc\"), 10s) FROM \"vnic\" WHERE \"system\" = 'computehost' AND $timeFilter GROUP BY time(1m) fill(null)",
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "receivedTotalPacketsAccumulated"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Received Octets",
+          "tooltip": {
+            "shared": true,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ],
+          "yaxes": [
+            {
+              "format": "short",
+              "label": "Octets/Packets",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "grid": {
+            "leftLogBase": 1,
+            "leftMax": null,
+            "leftMin": null,
+            "rightLogBase": 1,
+            "rightMax": null,
+            "rightMin": null
+          },
+          "id": 4,
+          "interval": "30s",
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "rightSide": false,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 2,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                },
+                {
+                  "params": [
+                    "vnic"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "vNicPerformance",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "refId": "B",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "receivedOctetsAccumulated"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Transmitted Octets",
+          "tooltip": {
+            "shared": true,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "x-axis": true,
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "y-axis": true,
+          "y_formats": [
+            "short",
+            "short"
+          ],
+          "yaxes": [
+            {
+              "format": "short",
+              "label": "Octets/Packets",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    },
+    {
+      "collapse": false,
+      "height": 362,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "id": 7,
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                },
+                {
+                  "params": [
+                    "disk"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "diskUsage",
+              "orderByTime": "ASC",
+              "policy": "autogen",
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "diskOpsWriteLast"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                },
+                {
+                  "condition": "AND",
+                  "key": "disk",
+                  "operator": "=",
+                  "value": "sda"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Disk Usage SDA",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 10,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "id": 8,
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 6,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                },
+                {
+                  "params": [
+                    "disk"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "diskUsage",
+              "orderByTime": "ASC",
+              "policy": "autogen",
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "diskOpsWriteLast"
+                    ],
+                    "type": "field"
+                  },
+                  {
+                    "params": [
+                      "5"
+                    ],
+                    "type": "moving_average"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                },
+                {
+                  "condition": "AND",
+                  "key": "disk",
+                  "operator": "=",
+                  "value": "sdb"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Disk Usage SDB",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 10,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    },
+    {
+      "collapse": false,
+      "height": 250,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "VESEvents",
+          "fill": 1,
+          "id": 5,
+          "legend": {
+            "alignAsTable": true,
+            "avg": true,
+            "current": true,
+            "max": true,
+            "min": true,
+            "show": true,
+            "sort": "current",
+            "sortDesc": true,
+            "total": false,
+            "values": true
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 12,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [
+                {
+                  "params": [
+                    "system"
+                  ],
+                  "type": "tag"
+                }
+              ],
+              "measurement": "memoryUsage",
+              "orderByTime": "ASC",
+              "policy": "autogen",
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "memoryUsed"
+                    ],
+                    "type": "field"
+                  }
+                ]
+              ],
+              "tags": [
+                {
+                  "key": "system",
+                  "operator": "=~",
+                  "value": "/^$host$/"
+                }
+              ]
+            }
+          ],
+          "thresholds": [],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Memory",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 10,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    }
+  ],
+  "schemaVersion": 14,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": [
+      {
+        "allValue": null,
+        "current": {},
+        "datasource": "VESEvents",
+        "hide": 0,
+        "includeAll": true,
+        "label": "host",
+        "multi": true,
+        "name": "host",
+        "options": [],
+        "query": "SHOW TAG VALUES WITH KEY=system",
+        "refresh": 1,
+        "regex": "",
+        "sort": 0,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      }
+    ]
+  },
+  "time": {
+    "from": "now-30m",
+    "to": "now"
+  },
+  "timepicker": {
+    "now": true,
+    "refresh_intervals": [
+      "10s",
+      "20s",
+      "30s",
+      "1m"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "browser",
+  "title": "VES Demo",
+  "version": 4
+}
+}
diff --git a/collector/Dockerfile b/collector/Dockerfile
new file mode 100644 (file)
index 0000000..27bf113
--- /dev/null
@@ -0,0 +1,35 @@
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: A Dockerfile for building an OPFNV VES Collector container image.
+#
+# Status: this is a work in progress, under test.
+#
+
+FROM ubuntu:xenial
+
+RUN apt-get update && apt-get install -y apt-utils
+RUN apt-get -y upgrade
+RUN apt-get update && apt-get install -y git python-pip python-jsonschema curl
+RUN pip install requests pytz tzlocal
+
+RUN mkdir /opt/ves
+
+# copy VES Collector over to the Docker
+RUN mkdir /opt/ves/evel-test-collector
+ADD evel-test-collector /opt/ves/evel-test-collector
+
+COPY Dashboard.json /opt/ves/Dashboard.json
+COPY start.sh /opt/ves/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
diff --git a/collector/Makefile b/collector/Makefile
new file mode 100644 (file)
index 0000000..1e4060f
--- /dev/null
@@ -0,0 +1,5 @@
+default: all
+
+all:
+       docker build -t ves-collector .
+
diff --git a/collector/datasource.json b/collector/datasource.json
new file mode 100644 (file)
index 0000000..4f439e8
--- /dev/null
@@ -0,0 +1,14 @@
+{ "name":"VESEvents",
+  "type":"influxdb",
+  "access":"direct",
+  "url":"http://127.0.0.1:3330",
+  "password":"root",
+  "user":"root",
+  "database":"veseventsdb",
+  "basicAuth":false,
+  "basicAuthUser":"",
+  "basicAuthPassword":"",
+  "withCredentials":false,
+  "isDefault":false,
+  "jsonData":null
+}
diff --git a/collector/evel-test-collector/LICENSE.md b/collector/evel-test-collector/LICENSE.md
new file mode 100644 (file)
index 0000000..f131384
--- /dev/null
@@ -0,0 +1,27 @@
+BSD License
+
+Copyright (c) 2016, AT&T Intellectual Property.  All other rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted
+provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions
+   and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice, this list of
+   conditions and the following disclaimer in the documentation and/or other materials provided
+   with the distribution.
+3. All advertising materials mentioning features or use of this software must display the
+   following acknowledgement:  This product includes software developed by the AT&T.
+4. Neither the name of AT&T nor the names of its contributors may be used to endorse or
+   promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY ''AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;  LOSS OF USE, DATA, OR PROFITS;
+OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/collector/evel-test-collector/code/README.md b/collector/evel-test-collector/code/README.md
new file mode 100644 (file)
index 0000000..6ccd58d
--- /dev/null
@@ -0,0 +1 @@
+NOTE: This folder and subfolders have not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
diff --git a/collector/evel-test-collector/code/collector/collector.py b/collector/evel-test-collector/code/collector/collector.py
new file mode 100644 (file)
index 0000000..27509f2
--- /dev/null
@@ -0,0 +1,656 @@
+#!/usr/bin/env python
+'''
+Program which acts as the collector for the Vendor Event Listener REST API.
+
+Only intended for test purposes.
+
+License
+-------
+
+Copyright(c) <2016>, AT&T Intellectual Property.  All other rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement:  This product includes
+   software developed by the AT&T.
+4. Neither the name of AT&T nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+'''
+
+from rest_dispatcher import PathDispatcher, set_404_content
+from wsgiref.simple_server import make_server
+import sys
+import os
+import platform
+import traceback
+import time
+from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
+import ConfigParser
+import logging.handlers
+from base64 import b64decode
+import string
+import json
+import jsonschema
+from functools import partial
+
+_hello_resp = '''\
+<html>
+  <head>
+     <title>Hello {name}</title>
+   </head>
+   <body>
+     <h1>Hello {name}!</h1>
+   </body>
+</html>'''
+
+_localtime_resp = '''\
+<?xml version="1.0"?>
+<time>
+  <year>{t.tm_year}</year>
+  <month>{t.tm_mon}</month>
+  <day>{t.tm_mday}</day>
+  <hour>{t.tm_hour}</hour>
+  <minute>{t.tm_min}</minute>
+  <second>{t.tm_sec}</second>
+</time>'''
+
+__all__ = []
+__version__ = 0.1
+__date__ = '2015-12-04'
+__updated__ = '2015-12-04'
+
+TESTRUN = False
+DEBUG = False
+PROFILE = False
+
+#------------------------------------------------------------------------------
+# Credentials we expect clients to authenticate themselves with.
+#------------------------------------------------------------------------------
+vel_username = ''
+vel_password = ''
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate events.
+#------------------------------------------------------------------------------
+vel_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate client throttle state.
+#------------------------------------------------------------------------------
+throttle_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to provoke throttling commands for testing.
+#------------------------------------------------------------------------------
+test_control_schema = None
+
+#------------------------------------------------------------------------------
+# Pending command list from the testControl API
+# This is sent as a response commandList to the next received event.
+#------------------------------------------------------------------------------
+pending_command_list = None
+
+#------------------------------------------------------------------------------
+# Logger for this module.
+#------------------------------------------------------------------------------
+logger = None
+
+def listener(environ, start_response, schema):
+    '''
+    Handler for the Vendor Event Listener REST API.
+
+    Extract headers and the body and check that:
+
+      1)  The client authenticated themselves correctly.
+      2)  The body validates against the provided schema for the API.
+
+    '''
+    logger.info('Got a Vendor Event request')
+    print('==== ' + time.asctime() + ' ' + '=' * 49)
+
+    #--------------------------------------------------------------------------
+    # Extract the content from the request.
+    #--------------------------------------------------------------------------
+    length = int(environ.get('CONTENT_LENGTH', '0'))
+    logger.debug('Content Length: {0}'.format(length))
+    body = environ['wsgi.input'].read(length)
+    logger.debug('Content Body: {0}'.format(body))
+
+    mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION',
+                                                     'None None'))
+    # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode,
+    #                                                     b64_credentials))
+    logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode))
+    if (b64_credentials != 'None'):
+        credentials = b64decode(b64_credentials)
+    else:
+        credentials = None
+
+    # logger.debug('Credentials: {0}'.format(credentials))
+    logger.debug('Credentials: ****')
+
+    #--------------------------------------------------------------------------
+    # If we have a schema file then check that the event matches that expected.
+    #--------------------------------------------------------------------------
+    if (schema is not None):
+        logger.debug('Attempting to validate data: {0}\n'
+                     'Against schema: {1}'.format(body, schema))
+        try:
+            decoded_body = json.loads(body)
+            jsonschema.validate(decoded_body, schema)
+            logger.info('Event is valid!')
+            print('Valid body decoded & checked against schema OK:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+
+        except jsonschema.SchemaError as e:
+            logger.error('Schema is not valid! {0}'.format(e))
+            print('Schema is not valid! {0}'.format(e))
+
+        except jsonschema.ValidationError as e:
+            logger.warn('Event is not valid against schema! {0}'.format(e))
+            print('Event is not valid against schema! {0}'.format(e))
+            print('Bad JSON body decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                         sort_keys=True,
+                                         indent=4,
+                                         separators=(',', ': '))))
+
+        except Exception as e:
+            logger.error('Event invalid for unexpected reason! {0}'.format(e))
+            print('Schema is not valid for unexpected reason! {0}'.format(e))
+    else:
+        logger.debug('No schema so just decode JSON: {0}'.format(body))
+        try:
+            decoded_body = json.loads(body)
+            print('Valid JSON body (no schema checking) decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                         sort_keys=True,
+                                         indent=4,
+                                         separators=(',', ': '))))
+            logger.info('Event is valid JSON but not checked against schema!')
+
+        except Exception as e:
+            logger.error('Event invalid for unexpected reason! {0}'.format(e))
+            print('JSON body not valid for unexpected reason! {0}'.format(e))
+
+    #--------------------------------------------------------------------------
+    # See whether the user authenticated themselves correctly.
+    #--------------------------------------------------------------------------
+    if (credentials == (vel_username + ':' + vel_password)):
+        logger.debug('Authenticated OK')
+        print('Authenticated OK')
+
+        #----------------------------------------------------------------------
+        # Respond to the caller. If we have a pending commandList from the
+        # testControl API, send it in response.
+        #----------------------------------------------------------------------
+        global pending_command_list
+        if pending_command_list is not None:
+            start_response('202 Accepted',
+                           [('Content-type', 'application/json')])
+            response = pending_command_list
+            pending_command_list = None
+
+            print('\n'+ '='*80)
+            print('Sending pending commandList in the response:\n'
+                  '{0}'.format(json.dumps(response,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+            print('='*80 + '\n')
+            yield json.dumps(response)
+        else:
+            start_response('202 Accepted', [])
+            yield ''
+    else:
+        logger.warn('Failed to authenticate OK')
+        print('Failed to authenticate OK')
+
+        #----------------------------------------------------------------------
+        # Respond to the caller.
+        #----------------------------------------------------------------------
+        start_response('401 Unauthorized', [ ('Content-type',
+                                              'application/json')])
+        req_error = { 'requestError': {
+                        'policyException': {
+                            'messageId': 'POL0001',
+                            'text': 'Failed to authenticate'
+                            }
+                        }
+                    }
+        yield json.dumps(req_error)
+
+def test_listener(environ, start_response, schema):
+    '''
+    Handler for the Test Collector Test Control API.
+
+    There is no authentication on this interface.
+
+    This simply stores a commandList which will be sent in response to the next
+    incoming event on the EVEL interface.
+    '''
+    global pending_command_list
+    logger.info('Got a Test Control input')
+    print('============================')
+    print('==== TEST CONTROL INPUT ====')
+
+    #--------------------------------------------------------------------------
+    # GET allows us to get the current pending request.
+    #--------------------------------------------------------------------------
+    if environ.get('REQUEST_METHOD') == 'GET':
+        start_response('200 OK', [('Content-type', 'application/json')])
+        yield json.dumps(pending_command_list)
+        return
+
+    #--------------------------------------------------------------------------
+    # Extract the content from the request.
+    #--------------------------------------------------------------------------
+    length = int(environ.get('CONTENT_LENGTH', '0'))
+    logger.debug('TestControl Content Length: {0}'.format(length))
+    body = environ['wsgi.input'].read(length)
+    logger.debug('TestControl Content Body: {0}'.format(body))
+
+    #--------------------------------------------------------------------------
+    # If we have a schema file then check that the event matches that expected.
+    #--------------------------------------------------------------------------
+    if (schema is not None):
+        logger.debug('Attempting to validate data: {0}\n'
+                     'Against schema: {1}'.format(body, schema))
+        try:
+            decoded_body = json.loads(body)
+            jsonschema.validate(decoded_body, schema)
+            logger.info('TestControl is valid!')
+            print('TestControl:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+
+        except jsonschema.SchemaError as e:
+            logger.error('TestControl Schema is not valid: {0}'.format(e))
+            print('TestControl Schema is not valid: {0}'.format(e))
+
+        except jsonschema.ValidationError as e:
+            logger.warn('TestControl input not valid: {0}'.format(e))
+            print('TestControl input not valid: {0}'.format(e))
+            print('Bad JSON body decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+
+        except Exception as e:
+            logger.error('TestControl input not valid: {0}'.format(e))
+            print('TestControl Schema not valid: {0}'.format(e))
+    else:
+        logger.debug('Missing schema just decode JSON: {0}'.format(body))
+        try:
+            decoded_body = json.loads(body)
+            print('Valid JSON body (no schema checking) decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+            logger.info('TestControl input not checked against schema!')
+
+        except Exception as e:
+            logger.error('TestControl input not valid: {0}'.format(e))
+            print('TestControl input not valid: {0}'.format(e))
+
+    #--------------------------------------------------------------------------
+    # Respond to the caller. If we received otherField 'ThrottleRequest',
+    # generate the appropriate canned response.
+    #--------------------------------------------------------------------------
+    pending_command_list = decoded_body
+    print('===== TEST CONTROL END =====')
+    print('============================')
+    start_response('202 Accepted', [])
+    yield ''
+
+def main(argv=None):
+    '''
+    Main function for the collector start-up.
+
+    Called with command-line arguments:
+        *    --config *<file>*
+        *    --section *<section>*
+        *    --verbose
+
+    Where:
+
+        *<file>* specifies the path to the configuration file.
+
+        *<section>* specifies the section within that config file.
+
+        *verbose* generates more information in the log files.
+
+    The process listens for REST API invocations and checks them. Errors are
+    displayed to stdout and logged.
+    '''
+
+    if argv is None:
+        argv = sys.argv
+    else:
+        sys.argv.extend(argv)
+
+    program_name = os.path.basename(sys.argv[0])
+    program_version = 'v{0}'.format(__version__)
+    program_build_date = str(__updated__)
+    program_version_message = '%%(prog)s {0} ({1})'.format(program_version,
+                                                         program_build_date)
+    if (__import__('__main__').__doc__ is not None):
+        program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
+    else:
+        program_shortdesc = 'Running in test harness'
+    program_license = '''{0}
+
+  Created  on {1}.
+  Copyright 2015 Metaswitch Networks Ltd. All rights reserved.
+
+  Distributed on an "AS IS" basis without warranties
+  or conditions of any kind, either express or implied.
+
+USAGE
+'''.format(program_shortdesc, str(__date__))
+
+    try:
+        #----------------------------------------------------------------------
+        # Setup argument parser so we can parse the command-line.
+        #----------------------------------------------------------------------
+        parser = ArgumentParser(description=program_license,
+                                formatter_class=ArgumentDefaultsHelpFormatter)
+        parser.add_argument('-v', '--verbose',
+                            dest='verbose',
+                            action='count',
+                            help='set verbosity level')
+        parser.add_argument('-V', '--version',
+                            action='version',
+                            version=program_version_message,
+                            help='Display version information')
+        parser.add_argument('-a', '--api-version',
+                            dest='api_version',
+                            default='3',
+                            help='set API version')
+        parser.add_argument('-c', '--config',
+                            dest='config',
+                            default='/etc/opt/att/collector.conf',
+                            help='Use this config file.',
+                            metavar='<file>')
+        parser.add_argument('-s', '--section',
+                            dest='section',
+                            default='default',
+                            metavar='<section>',
+                            help='section to use in the config file')
+
+        #----------------------------------------------------------------------
+        # Process arguments received.
+        #----------------------------------------------------------------------
+        args = parser.parse_args()
+        verbose = args.verbose
+        api_version = args.api_version
+        config_file = args.config
+        config_section = args.section
+
+        #----------------------------------------------------------------------
+        # Now read the config file, using command-line supplied values as
+        # overrides.
+        #----------------------------------------------------------------------
+        defaults = {'log_file': 'collector.log',
+                    'vel_port': '12233',
+                    'vel_path': '',
+                    'vel_topic_name': ''
+                   }
+        overrides = {}
+        config = ConfigParser.SafeConfigParser(defaults)
+        config.read(config_file)
+
+        #----------------------------------------------------------------------
+        # extract the values we want.
+        #----------------------------------------------------------------------
+        log_file = config.get(config_section, 'log_file', vars=overrides)
+        vel_port = config.get(config_section, 'vel_port', vars=overrides)
+        vel_path = config.get(config_section, 'vel_path', vars=overrides)
+        vel_topic_name = config.get(config_section,
+                                    'vel_topic_name',
+                                    vars=overrides)
+        global vel_username
+        global vel_password
+        vel_username = config.get(config_section,
+                                  'vel_username',
+                                  vars=overrides)
+        vel_password = config.get(config_section,
+                                  'vel_password',
+                                  vars=overrides)
+        vel_schema_file = config.get(config_section,
+                                     'schema_file',
+                                     vars=overrides)
+        base_schema_file = config.get(config_section,
+                                      'base_schema_file',
+                                      vars=overrides)
+        throttle_schema_file = config.get(config_section,
+                                          'throttle_schema_file',
+                                          vars=overrides)
+        test_control_schema_file = config.get(config_section,
+                                           'test_control_schema_file',
+                                           vars=overrides)
+
+        #----------------------------------------------------------------------
+        # Finally we have enough info to start a proper flow trace.
+        #----------------------------------------------------------------------
+        global logger
+        print('Logfile: {0}'.format(log_file))
+        logger = logging.getLogger('collector')
+        if verbose > 0:
+            print('Verbose mode on')
+            logger.setLevel(logging.DEBUG)
+        else:
+            logger.setLevel(logging.INFO)
+        handler = logging.handlers.RotatingFileHandler(log_file,
+                                                       maxBytes=1000000,
+                                                       backupCount=10)
+        if (platform.system() == 'Windows'):
+            date_format = '%Y-%m-%d %H:%M:%S'
+        else:
+            date_format = '%Y-%m-%d %H:%M:%S.%f %z'
+        formatter = logging.Formatter('%(asctime)s %(name)s - '
+                                      '%(levelname)s - %(message)s',
+                                      date_format)
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
+        logger.info('Started')
+
+        #----------------------------------------------------------------------
+        # Log the details of the configuration.
+        #----------------------------------------------------------------------
+        logger.debug('Log file = {0}'.format(log_file))
+        logger.debug('Event Listener Port = {0}'.format(vel_port))
+        logger.debug('Event Listener Path = {0}'.format(vel_path))
+        logger.debug('Event Listener Topic = {0}'.format(vel_topic_name))
+        logger.debug('Event Listener Username = {0}'.format(vel_username))
+        # logger.debug('Event Listener Password = {0}'.format(vel_password))
+        logger.debug('Event Listener JSON Schema File = {0}'.format(
+                                                              vel_schema_file))
+        logger.debug('Base JSON Schema File = {0}'.format(base_schema_file))
+        logger.debug('Throttle JSON Schema File = {0}'.format(
+                                                         throttle_schema_file))
+        logger.debug('Test Control JSON Schema File = {0}'.format(
+                                                     test_control_schema_file))
+
+        #----------------------------------------------------------------------
+        # Perform some basic error checking on the config.
+        #----------------------------------------------------------------------
+        if (int(vel_port) < 1024 or int(vel_port) > 65535):
+            logger.error('Invalid Vendor Event Listener port ({0}) '
+                         'specified'.format(vel_port))
+            raise RuntimeError('Invalid Vendor Event Listener port ({0}) '
+                               'specified'.format(vel_port))
+
+        if (len(vel_path) > 0 and vel_path[-1] != '/'):
+            logger.warning('Event Listener Path ({0}) should have terminating '
+                           '"/"!  Adding one on to configured string.'.format(
+                                                                     vel_path))
+            vel_path += '/'
+
+        #----------------------------------------------------------------------
+        # Load up the vel_schema, if it exists.
+        #----------------------------------------------------------------------
+        if not os.path.exists(vel_schema_file):
+            logger.warning('Event Listener Schema File ({0}) not found. '
+                           'No validation will be undertaken.'.format(
+                                                              vel_schema_file))
+        else:
+            global vel_schema
+            global throttle_schema
+            global test_control_schema
+            vel_schema = json.load(open(vel_schema_file, 'r'))
+            logger.debug('Loaded the JSON schema file')
+
+            #------------------------------------------------------------------
+            # Load up the throttle_schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(throttle_schema_file)):
+                logger.debug('Loading throttle schema')
+                throttle_fragment = json.load(open(throttle_schema_file, 'r'))
+                throttle_schema = {}
+                throttle_schema.update(vel_schema)
+                throttle_schema.update(throttle_fragment)
+                logger.debug('Loaded the throttle schema')
+
+            #------------------------------------------------------------------
+            # Load up the test control _schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(test_control_schema_file)):
+                logger.debug('Loading test control schema')
+                test_control_fragment = json.load(
+                    open(test_control_schema_file, 'r'))
+                test_control_schema = {}
+                test_control_schema.update(vel_schema)
+                test_control_schema.update(test_control_fragment)
+                logger.debug('Loaded the test control schema')
+
+            #------------------------------------------------------------------
+            # Load up the base_schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(base_schema_file)):
+                logger.debug('Updating the schema with base definition')
+                base_schema = json.load(open(base_schema_file, 'r'))
+                vel_schema.update(base_schema)
+                logger.debug('Updated the JSON schema file')
+
+        #----------------------------------------------------------------------
+        # We are now ready to get started with processing. Start-up the various
+        # components of the system in order:
+        #
+        #  1) Create the dispatcher.
+        #  2) Register the functions for the URLs of interest.
+        #  3) Run the webserver.
+        #----------------------------------------------------------------------
+        root_url = '/{0}eventListener/v{1}{2}'.\
+                   format(vel_path,
+                          api_version,
+                          '/' + vel_topic_name
+                          if len(vel_topic_name) > 0
+                          else '')
+        throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\
+                       format(vel_path, api_version)
+        set_404_content(root_url)
+        dispatcher = PathDispatcher()
+        vendor_event_listener = partial(listener, schema = vel_schema)
+        dispatcher.register('GET', root_url, vendor_event_listener)
+        dispatcher.register('POST', root_url, vendor_event_listener)
+        vendor_throttle_listener = partial(listener, schema = throttle_schema)
+        dispatcher.register('GET', throttle_url, vendor_throttle_listener)
+        dispatcher.register('POST', throttle_url, vendor_throttle_listener)
+
+        #----------------------------------------------------------------------
+        # We also add a POST-only mechanism for test control, so that we can
+        # send commands to a single attached client.
+        #----------------------------------------------------------------------
+        test_control_url = '/testControl/v{0}/commandList'.format(api_version)
+        test_control_listener = partial(test_listener,
+                                        schema = test_control_schema)
+        dispatcher.register('POST', test_control_url, test_control_listener)
+        dispatcher.register('GET', test_control_url, test_control_listener)
+
+        httpd = make_server('', int(vel_port), dispatcher)
+        print('Serving on port {0}...'.format(vel_port))
+        httpd.serve_forever()
+
+        logger.error('Main loop exited unexpectedly!')
+        return 0
+
+    except KeyboardInterrupt:
+        #----------------------------------------------------------------------
+        # handle keyboard interrupt
+        #----------------------------------------------------------------------
+        logger.info('Exiting on keyboard interrupt!')
+        return 0
+
+    except Exception as e:
+        #----------------------------------------------------------------------
+        # Handle unexpected exceptions.
+        #----------------------------------------------------------------------
+        if DEBUG or TESTRUN:
+            raise(e)
+        indent = len(program_name) * ' '
+        sys.stderr.write(program_name + ': ' + repr(e) + '\n')
+        sys.stderr.write(indent + '  for help use --help\n')
+        sys.stderr.write(traceback.format_exc())
+        logger.critical('Exiting because of exception: {0}'.format(e))
+        logger.critical(traceback.format_exc())
+        return 2
+
+#------------------------------------------------------------------------------
+# MAIN SCRIPT ENTRY POINT.
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+    if TESTRUN:
+        #----------------------------------------------------------------------
+        # Running tests - note that doctest comments haven't been included so
+        # this is a hook for future improvements.
+        #----------------------------------------------------------------------
+        import doctest
+        doctest.testmod()
+
+    if PROFILE:
+        #----------------------------------------------------------------------
+        # Profiling performance.  Performance isn't expected to be a major
+        # issue, but this should all work as expected.
+        #----------------------------------------------------------------------
+        import cProfile
+        import pstats
+        profile_filename = 'collector_profile.txt'
+        cProfile.run('main()', profile_filename)
+        statsfile = open('collector_profile_stats.txt', 'wb')
+        p = pstats.Stats(profile_filename, stream=statsfile)
+        stats = p.strip_dirs().sort_stats('cumulative')
+        stats.print_stats()
+        statsfile.close()
+        sys.exit(0)
+
+    #--------------------------------------------------------------------------
+    # Normal operation - call through to the main function.
+    #--------------------------------------------------------------------------
+    sys.exit(main())
diff --git a/collector/evel-test-collector/code/collector/monitor.py b/collector/evel-test-collector/code/collector/monitor.py
new file mode 100755 (executable)
index 0000000..98763c5
--- /dev/null
@@ -0,0 +1,997 @@
+#!/usr/bin/env python
+#
+#Original work Copyright 2016-2017 AT&T Intellectual Property, Inc
+#Modified work Copyright 2021 Xoriant Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from rest_dispatcher import PathDispatcher, set_404_content
+from wsgiref.simple_server import make_server
+import sys
+import os
+import platform
+import traceback
+import time
+from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
+import ConfigParser
+import logging.handlers
+from base64 import b64decode
+import string
+import json
+import jsonschema
+from functools import partial
+import requests
+from datetime import datetime, date, time
+import calendar
+import datetime
+import time
+import tzlocal
+import pytz
+
+monitor_mode = "f"
+vdu_id = ['','','','','','']
+summary_e = ['***** Summary of key stats *****','','','']
+summary_c = ['Collectd agents:']
+status = ['','Started','Started','Started']
+base_url = ''
+template_404 = b'''POST {0}'''
+columns = 0
+rows = 0
+
+class JSONObject:
+  def __init__(self, d):
+    self.__dict__ = d
+
+__all__ = []
+__version__ = 0.1
+__date__ = '2015-12-04'
+__updated__ = '2015-12-04'
+
+TESTRUN = False
+DEBUG = False
+PROFILE = False
+
+#------------------------------------------------------------------------------
+# Address of influxdb server.
+#------------------------------------------------------------------------------
+
+influxdb = '127.0.0.1'
+
+#------------------------------------------------------------------------------
+# Credentials we expect clients to authenticate themselves with.
+#------------------------------------------------------------------------------
+vel_username = ''
+vel_password = ''
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate events.
+#------------------------------------------------------------------------------
+vel_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to validate client throttle state.
+#------------------------------------------------------------------------------
+throttle_schema = None
+
+#------------------------------------------------------------------------------
+# The JSON schema which we will use to provoke throttling commands for testing.
+#------------------------------------------------------------------------------
+test_control_schema = None
+
+#------------------------------------------------------------------------------
+# Pending command list from the testControl API
+# This is sent as a response commandList to the next received event.
+#------------------------------------------------------------------------------
+pending_command_list = None
+
+#------------------------------------------------------------------------------
+# Logger for this module.
+#------------------------------------------------------------------------------
+logger = None
+
+def listener(environ, start_response, schema):
+    '''
+    Handler for the Vendor Event Listener REST API.
+
+    Extract headers and the body and check that:
+
+      1)  The client authenticated themselves correctly.
+      2)  The body validates against the provided schema for the API.
+
+    '''
+    logger.info('Got a Vendor Event request')
+    logger.info('==== ' + time.asctime() + ' ' + '=' * 49)
+
+    #--------------------------------------------------------------------------
+    # Extract the content from the request.
+    #--------------------------------------------------------------------------
+    length = int(environ.get('CONTENT_LENGTH', '0'))
+    logger.debug('Content Length: {0}'.format(length))
+    body = environ['wsgi.input'].read(length)
+    logger.debug('Content Body: {0}'.format(body))
+
+    mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION',
+                                                     'None None'))
+    # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode,
+    #                                                     b64_credentials))
+    logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode))
+    if (b64_credentials != 'None'):
+        credentials = b64decode(b64_credentials)
+    else:
+        credentials = None
+
+    # logger.debug('Credentials: {0}'.format(credentials))
+    logger.debug('Credentials: ****')
+
+    #--------------------------------------------------------------------------
+    # If we have a schema file then check that the event matches that expected.
+    #--------------------------------------------------------------------------
+    if (schema is not None):
+        logger.debug('Attempting to validate data: {0}\n'
+                     'Against schema: {1}'.format(body, schema))
+        try:
+            decoded_body = json.loads(body)
+            jsonschema.validate(decoded_body, schema)
+            logger.info('Event is valid!')
+            logger.debug('Valid body decoded & checked against schema OK:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+    #--------------------------------------------------------------------------
+    # See whether the user authenticated themselves correctly.
+    #--------------------------------------------------------------------------
+            if (credentials == (vel_username + ':' + vel_password)):
+                logger.debug('Authenticated OK')
+
+        #----------------------------------------------------------------------
+        # Respond to the caller. If we have a pending commandList from the
+        # testControl API, send it in response.
+        #----------------------------------------------------------------------
+                global pending_command_list
+                if pending_command_list is not None:
+                    start_response('202 Accepted',
+                           [('Content-type', 'application/json')])
+                    response = pending_command_list
+                    pending_command_list = None
+
+                    logger.debug('\n'+ '='*80)
+                    logger.debug('Sending pending commandList in the response:\n'
+                          '{0}'.format(json.dumps(response,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+                    logger.debug('='*80 + '\n')
+                    yield json.dumps(response)
+                else:
+                    start_response('202 Accepted', [])
+                    yield ''
+            else:
+                logger.warn('Failed to authenticate OK; creds: ' +  credentials)
+                logger.warn('Failed to authenticate agent credentials: ', credentials, 
+                            'against expected ', vel_username, ':', vel_password)
+
+        #----------------------------------------------------------------------
+        # Respond to the caller.
+        #----------------------------------------------------------------------
+                start_response('401 Unauthorized', [ ('Content-type',
+                                              'application/json')])
+                req_error = { 'requestError': {
+                                 'policyException': {
+                                     'messageId': 'POL0001',
+                                      'text': 'Failed to authenticate'
+                            }
+                        }
+                    }
+                yield json.dumps(req_error)
+
+            logger.info("data_storage ={}".format(data_storage))
+            if(data_storage == 'influxdb'):
+                save_event_in_db(body)
+
+        except jsonschema.SchemaError as e:
+            logger.error('Schema is not valid! {0}'.format(e))
+
+        except jsonschema.ValidationError as e:
+            logger.warn('Event is not valid against schema! {0}'.format(e))
+            logger.warn('Bad JSON body decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                         sort_keys=True,
+                                         indent=4,
+                                         separators=(',', ': '))))
+
+        except Exception as e:
+            logger.error('Event invalid for unexpected reason! {0}'.format(e))
+    else:
+        logger.debug('No schema so just decode JSON: {0}'.format(body))
+        try:
+            decoded_body = json.loads(body)
+            logger.warn('Valid JSON body (no schema checking) decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                         sort_keys=True,
+                                         indent=4,
+                                         separators=(',', ': '))))
+            logger.warn('Event is valid JSON but not checked against schema!')
+
+        except Exception as e:
+            logger.error('Event invalid for unexpected reason! {0}'.format(e))
+
+#--------------------------------------------------------------------------
+# Send event to influxdb
+#--------------------------------------------------------------------------
+def send_to_influxdb(event,pdata):
+  url = 'http://{}/write?db=veseventsdb'.format(influxdb)
+  logger.debug('Send {} to influxdb at {}: {}'.format(event,influxdb,pdata))
+  r = requests.post(url, data=pdata, headers={'Content-Type': 'text/plain'})
+  logger.debug('influxdb return code {}'.format(r.status_code))
+  if r.status_code != 204:
+    logger.debug('*** Influxdb save failed, return code {} ***'.format(r.status_code))
+
+#--------------------------------------------------------------------------
+# Convert timestamp to integer
+#--------------------------------------------------------------------------
+def convertTimestampToInt(timestamp, timeFormat="%Y-%m-%dT%H:%M:%S.%fz"):
+  date_time_obj = datetime.datetime.strptime(timestamp, timeFormat)
+  local_timezone = tzlocal.get_localzone();
+  local_timestamp = date_time_obj.replace(tzinfo=pytz.utc).astimezone(local_timezone).strftime(timeFormat)
+  date_time_obj_new = datetime.datetime.strptime(local_timestamp, timeFormat)
+  unixtime = time.mktime(date_time_obj_new.timetuple())
+  return int(float(unixtime) * float(1000000000))
+
+#--------------------------------------------------------------------------
+# Save event data
+#--------------------------------------------------------------------------
+def save_event_in_db(body):
+  jobj = json.loads(body)
+  e = json.loads(body, object_hook=JSONObject)
+
+  domain = jobj['event']['commonEventHeader']['domain']
+  timestamp = jobj['event']['commonEventHeader']['lastEpochMicrosec']
+  agent = jobj['event']['commonEventHeader']['reportingEntityName'].upper(     )
+  if "LOCALHOST" in agent:
+    agent = "computehost"
+  source = jobj['event']['commonEventHeader']['sourceId'].upper(       )
+
+###################################################
+  ## processing common header part
+  pdata = domain
+  nonstringpdata = " "
+  commonHeaderObj = jobj['event']['commonEventHeader'].items()
+  for key,val in commonHeaderObj:
+     if val != "" :
+      if isinstance(val, unicode):
+        pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
+      else:
+        nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
+
+
+  ## processing pnfRegistration events
+  if 'pnfRegistrationFields' in jobj['event']:
+    logger.debug('Found pnfRegistrationFields')
+
+    d = jobj['event']['pnfRegistrationFields'].items()
+    for key,val in d:
+      if key != 'additionalFields' and val != "" :
+        if isinstance(val, unicode):
+          pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
+        else:
+          nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
+      elif key == 'additionalFields':
+          for key2,val2 in val.items():
+            if val2 != "" and isinstance(val2, unicode):
+              pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
+            elif val2 != "" :
+              nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+
+    send_to_influxdb(domain, pdata + nonstringpdata[:-1])
+
+
+  ## processing thresholdCrossingAlert events
+  if 'thresholdCrossingAlertFields' in jobj['event']:
+    logger.debug('Found thresholdCrossingAlertFields')
+
+    d = jobj['event']['thresholdCrossingAlertFields'].items()
+    for key,val in d:
+      if (key != 'additionalFields' and key != 'additionalParameters' and key != 'associatedAlertIdList') and val != "" :
+        if isinstance(val, unicode):
+          if key == "collectionTimestamp" or key == "eventStartTimestamp" :
+            nonstringpdata = nonstringpdata + '{}={}'.format(key,convertTimestampToInt(val[:-6], "%a, %d %b %Y %H:%M:%S"))+ ','
+          else:
+            pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
+        else:
+          nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
+      elif key == 'additionalFields':
+        for key2,val2 in val.items():
+          if key2 == 'eventTime' :
+            eventTime = convertTimestampToInt(val2)
+          else:
+            if val2 != "" and isinstance(val2, unicode):
+              pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
+            elif val2 != "" :
+              nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+      elif key == 'additionalParameters':
+        for addParameter in val:
+          for key2,val2 in addParameter.items():
+            if key2 != "hashMap" :
+              if val2 != "" and isinstance(val2, unicode):
+                pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
+              elif val2 != "" :
+                nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+            elif key2 == "hashMap" :
+              for key3,val3 in val2.items():
+                if val3 != "" and isinstance(val3, unicode):
+                  pdata = pdata + ',{}={}'.format(key3,val3.replace(' ','-'))
+                elif val3 != "" :
+                  nonstringpdata = nonstringpdata + '{}={}'.format(key3,val3) + ','
+      elif key == 'associatedAlertIdList':
+        associatedAlertIdList = ""
+        for associatedAlertId in val:
+            associatedAlertIdList = associatedAlertIdList + associatedAlertId + "|"
+        if(associatedAlertIdList != ""):
+          pdata = pdata + ',{}={}'.format("associatedAlertIdList",associatedAlertIdList.replace(' ','-')[:-1])
+
+    send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
+
+
+  ## processing fault events
+  if 'faultFields' in jobj['event']:
+    logger.debug('Found faultFields')
+
+    d = jobj['event']['faultFields'].items()
+    for key,val in d:
+      if key != 'alarmAdditionalInformation' and val != "" :
+        if isinstance(val, unicode):
+          pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
+        else:
+          nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
+      elif key == 'alarmAdditionalInformation':
+          for key2,val2 in val.items():
+            if key2 == 'eventTime' :
+              eventTime = convertTimestampToInt(val2)
+            else:
+              if val2 != "" and isinstance(val2, unicode):
+                pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
+              elif val2 != "" :
+                nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+
+    send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
+
+
+  ###process heartbeat events
+  if 'heartbeatFields' in jobj['event']:
+    logger.debug('Found Heartbeat')
+
+    d = jobj['event']['heartbeatFields'].items()
+    for key,val in d:
+      if key != 'additionalFields' and val != "" :
+        if isinstance(val, unicode):
+          pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
+        else:
+          nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
+      elif key == 'additionalFields':
+          for key2,val2 in val.items():
+            if key2 == 'eventTime' :
+              eventTime = convertTimestampToInt(val2)
+            else:
+              if val2 != "" and isinstance(val2, unicode):
+                pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
+              elif val2 != "" :
+                nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+
+    send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
+
+
+  ## processing measurement events
+  if 'measurementFields' in jobj['event']:
+    logger.debug('Found measurementFields')
+    d = jobj['event']['measurementFields'].items()
+    nonstringKey = ["concurrentSessions","configuredEntities","meanRequestLatency","measurementFieldsVersion","measurementInterval",
+    "nfcScalingMetric","numberOfMediaPortsInUse","requestRate"]
+
+    pdata = pdata + ' '
+    for key,val in d:
+      for nonstrKey in nonstringKey:
+        if key == nonstrKey:
+          pdata = pdata + '{}={}'.format(key,val) + ','
+
+    send_to_influxdb("fault", pdata[:-1])
+
+
+  if 'measurementsForVfScalingFields' in jobj['event']:
+    logger.debug('Found measurementsForVfScalingFields')
+
+#        "measurementsForVfScalingFields": {
+#            "additionalMeasurements": [
+#                {
+#                    "arrayOfFields": [
+#                        {
+#                            "name": "load-longterm",
+#                            "value": "0.34"
+#                        },
+#                        {
+#                            "name": "load-shortterm",
+#                            "value": "0.32"
+#                        },
+#                        {
+#                            "name": "load-midterm",
+#                            "value": "0.34"
+#                        }
+#                    ],
+#                    "name": "load"
+#                }
+#            ],
+
+    if 'additionalMeasurements' in jobj['event']['measurementsForVfScalingFields']:
+      for meas in jobj['event']['measurementsForVfScalingFields']['additionalMeasurements']:
+        name = meas['name']
+        eventTime = int(float(meas['eventTime']) * float(1000000000))
+
+        if name =="kernel4-filterAccounting":
+            data = '{},system={}'.format(name,source)
+            for field in meas['arrayOfFields']:
+               if field['name'] =="ipt-packets-value":
+                 val=field['value']
+               else:
+                 data = data + ",{}={}".format(field['name'],field['value'])
+
+            data = data + ' ' + "ipt-packets-value=" + val + ' ' + format(eventTime)
+            send_to_influxdb("iptables", data)
+        else:
+            pdata = '{},system={}'.format(name,source)
+
+            for field in meas['arrayOfFields']:
+              pdata = pdata + ",{}={}".format(field['name'],field['value'])
+            #pdata = pdata + ",{}={}".format("eventTime",meas['eventTime'])
+            i=pdata.find(',', pdata.find('system'))
+            pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
+            send_to_influxdb("systemLoad", pdata)
+
+#            "cpuUsageArray": [
+#                {
+#                    "cpuIdentifier": "15",
+#                    "cpuIdle": 99.8998998999,
+#                    "cpuUsageInterrupt": 0,
+#                    "cpuUsageNice": 0,
+#                    "cpuUsageSoftIrq": 0,
+#                    "cpuUsageSteal": 0,
+#                    "cpuUsageSystem": 0,
+#                    "cpuUsageUser": 0.1001001001,
+#                    "cpuWait": 0,
+#                    "percentUsage": 0.0
+#                },
+
+
+
+    if 'cpuUsageArray' in jobj['event']['measurementsForVfScalingFields']:
+      logger.debug('Found cpuUsageArray')
+      for disk in jobj['event']['measurementsForVfScalingFields']['cpuUsageArray']:
+        id=disk['cpuIdentifier']
+        pdata = 'cpuUsage,system={},cpu={}'.format(source,id)
+        d = disk.items()
+        for key,val in d:
+          if key == 'eventTime':
+            eventTime = int(float(val) * float(1000000000))
+          elif key != 'cpuIdentifier':
+            pdata = pdata + ',{}={}'.format(key,val)
+
+        i=pdata.find(',', pdata.find('cpu='))
+        pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
+        send_to_influxdb("cpuUsage", pdata)
+
+#            "diskUsageArray": [
+#                {
+#                    "diskIdentifier": "sda",
+#                    "diskIoTimeLast": 0.3996139893,
+#                    "diskMergedReadLast": 0,
+#                    "diskMergedWriteLast": 26.1747155344,
+#                    "diskOctetsReadLast": 0,
+#                    "diskOctetsWriteLast": 309767.93302,
+#                    "diskOpsReadLast": 0,
+#                    "diskOpsWriteLast": 10.9893839563,
+#                    "diskTimeReadLast": 0,
+#                    "diskTimeWriteLast": 0.699324445683
+#                },
+
+    if 'diskUsageArray' in jobj['event']['measurementsForVfScalingFields']:
+      logger.debug('Found diskUsageArray')
+      for disk in jobj['event']['measurementsForVfScalingFields']['diskUsageArray']:
+        id=disk['diskIdentifier']
+        pdata = 'diskUsage,system={},disk={}'.format(source,id)
+        d = disk.items()
+        for key,val in d:
+          if key == 'eventTime':
+            eventTime = int(float(val) * float(1000000000))
+          elif key != 'diskIdentifier':
+            pdata = pdata + ',{}={}'.format(key,val)
+
+        i=pdata.find(',', pdata.find('disk='))
+        pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
+        send_to_influxdb("diskUsage", pdata)
+
+#            "memoryUsageArray": [
+#                {
+#                    "memoryBuffered": 269056.0,
+#                    "memoryCached": 17636956.0,
+#                    "memoryFree": 244731658240,
+#                    "memorySlabRecl": 753160.0,
+#                    "memorySlabUnrecl": 210800.0,
+#                    "memoryUsed": 6240064.0,
+#                    "vmIdentifier": "opnfv01"
+#                }
+#            ],
+
+    if 'memoryUsageArray' in jobj['event']['measurementsForVfScalingFields']:
+      logger.debug('Found memoryUsageArray')
+      pdata = 'memoryUsage,system={}'.format(source)
+      vmid=e.event.measurementsForVfScalingFields.memoryUsageArray[0].vmIdentifier
+      d = jobj['event']['measurementsForVfScalingFields']['memoryUsageArray'][0].items()
+      for key,val in d:
+        if key == 'eventTime':
+          eventTime = int(float(val) * float(1000000000))
+        elif key != 'vmIdentifier':
+          pdata = pdata + ',{}={}'.format(key,val)
+
+      i=pdata.find(',', pdata.find('system'))
+      pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
+      send_to_influxdb("memoryUsage", pdata)
+
+#            "vNicPerformanceArray": [
+#                {
+#                    "receivedDiscardedPacketsAccumulated": 0,
+#                    "receivedErrorPacketsAccumulated": 0,
+#                    "receivedOctetsAccumulated": 476.801524578,
+#                    "receivedTotalPacketsAccumulated": 2.90000899705,
+#                    "transmittedDiscardedPacketsAccumulated": 0,
+#                    "transmittedErrorPacketsAccumulated": 0,
+#                    "transmittedOctetsAccumulated": 230.100735749,
+#                    "transmittedTotalPacketsAccumulated": 1.20000372292,
+#                    "vNicIdentifier": "eno4",
+#                    "valuesAreSuspect": "true"
+#                },
+
+    if 'vNicPerformanceArray' in jobj['event']['measurementsForVfScalingFields']:
+      logger.debug('Found vNicPerformanceArray')
+      for vnic in jobj['event']['measurementsForVfScalingFields']['vNicPerformanceArray']:
+        vnid=vnic['vNicIdentifier']
+        pdata = 'vNicPerformance,system={},vnic={}'.format(vmid,vnid)
+        d = vnic.items()
+        for key,val in d:
+          if key == 'eventTime':
+            eventTime = int(float(val) * float(1000000000))
+          elif key != 'vNicIdentifier':
+            pdata = pdata + ',{}={}'.format(key,val)
+
+        i=pdata.find(',', pdata.find('vnic'))
+        pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
+        send_to_influxdb("vNicPerformance", pdata)
+
+def test_listener(environ, start_response, schema):
+    '''
+    Handler for the Test Collector Test Control API.
+
+    There is no authentication on this interface.
+
+    This simply stores a commandList which will be sent in response to the next
+    incoming event on the EVEL interface.
+    '''
+    global pending_command_list
+    logger.info('Got a Test Control input')
+    logger.info('============================')
+    logger.info('==== TEST CONTROL INPUT ====')
+
+    #--------------------------------------------------------------------------
+    # GET allows us to get the current pending request.
+    #--------------------------------------------------------------------------
+    if environ.get('REQUEST_METHOD') == 'GET':
+        start_response('200 OK', [('Content-type', 'application/json')])
+        yield json.dumps(pending_command_list)
+        return
+
+    #--------------------------------------------------------------------------
+    # Extract the content from the request.
+    #--------------------------------------------------------------------------
+    length = int(environ.get('CONTENT_LENGTH', '0'))
+    logger.debug('TestControl Content Length: {0}'.format(length))
+    body = environ['wsgi.input'].read(length)
+    logger.debug('TestControl Content Body: {0}'.format(body))
+
+    #--------------------------------------------------------------------------
+    # If we have a schema file then check that the event matches that expected.
+    #--------------------------------------------------------------------------
+    if (schema is not None):
+        logger.debug('Attempting to validate data: {0}\n'
+                     'Against schema: {1}'.format(body, schema))
+        try:
+            decoded_body = json.loads(body)
+            jsonschema.validate(decoded_body, schema)
+            logger.info('TestControl is valid!')
+            logger.info('TestControl:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+
+        except jsonschema.SchemaError as e:
+            logger.error('TestControl Schema is not valid: {0}'.format(e))
+
+        except jsonschema.ValidationError as e:
+            logger.error('TestControl input not valid: {0}'.format(e))
+            logger.error('Bad JSON body decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+
+        except Exception as e:
+            logger.error('TestControl input not valid: {0}'.format(e))
+    else:
+        logger.debug('Missing schema just decode JSON: {0}'.format(body))
+        try:
+            decoded_body = json.loads(body)
+            logger.info('Valid JSON body (no schema checking) decoded:\n'
+                  '{0}'.format(json.dumps(decoded_body,
+                                          sort_keys=True,
+                                          indent=4,
+                                          separators=(',', ': '))))
+            logger.info('TestControl input not checked against schema!')
+
+        except Exception as e:
+            logger.error('TestControl input not valid: {0}'.format(e))
+
+    #--------------------------------------------------------------------------
+    # Respond to the caller. If we received otherField 'ThrottleRequest',
+    # generate the appropriate canned response.
+    #--------------------------------------------------------------------------
+    pending_command_list = decoded_body
+    logger.debug('===== TEST CONTROL END =====')
+    logger.debug('============================')
+    start_response('202 Accepted', [])
+    yield ''
+
+def main(argv=None):
+    '''
+    Main function for the collector start-up.
+
+    Called with command-line arguments:
+        *    --config *<file>*
+        *    --section *<section>*
+        *    --verbose
+
+    Where:
+
+        *<file>* specifies the path to the configuration file.
+
+        *<section>* specifies the section within that config file.
+
+        *verbose* generates more information in the log files.
+
+    The process listens for REST API invocations and checks them. Errors are
+    displayed to stdout and logged.
+    '''
+
+    if argv is None:
+        argv = sys.argv
+    else:
+        sys.argv.extend(argv)
+
+    program_name = os.path.basename(sys.argv[0])
+    program_version = 'v{0}'.format(__version__)
+    program_build_date = str(__updated__)
+    program_version_message = '%%(prog)s {0} ({1})'.format(program_version,
+                                                         program_build_date)
+    if (__import__('__main__').__doc__ is not None):
+        program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
+    else:
+        program_shortdesc = 'Running in test harness'
+    program_license = '''{0}
+
+  Created  on {1}.
+  Copyright 2015 Metaswitch Networks Ltd. All rights reserved.
+
+  Distributed on an "AS IS" basis without warranties
+  or conditions of any kind, either express or implied.
+
+USAGE
+'''.format(program_shortdesc, str(__date__))
+
+    try:
+        #----------------------------------------------------------------------
+        # Setup argument parser so we can parse the command-line.
+        #----------------------------------------------------------------------
+        parser = ArgumentParser(description=program_license,
+                                formatter_class=ArgumentDefaultsHelpFormatter)
+        parser.add_argument('-i', '--influxdb',
+                            dest='influxdb',
+                            default='localhost',
+                            help='InfluxDB server addresss')
+        parser.add_argument('-v', '--verbose',
+                            dest='verbose',
+                            action='count',
+                            help='set verbosity level')
+        parser.add_argument('-V', '--version',
+                            action='version',
+                            version=program_version_message,
+                            help='Display version information')
+        parser.add_argument('-a', '--api-version',
+                            dest='api_version',
+                            default='7',
+                            help='set API version')
+        parser.add_argument('-c', '--config',
+                            dest='config',
+                            default='/etc/opt/att/collector.conf',
+                            help='Use this config file.',
+                            metavar='<file>')
+        parser.add_argument('-s', '--section',
+                            dest='section',
+                            default='default',
+                            metavar='<section>',
+                            help='section to use in the config file')
+
+        #----------------------------------------------------------------------
+        # Process arguments received.
+        #----------------------------------------------------------------------
+        args = parser.parse_args()
+        verbose = args.verbose
+        api_version = args.api_version
+        config_file = args.config
+        config_section = args.section
+
+        #----------------------------------------------------------------------
+        # Now read the config file, using command-line supplied values as
+        # overrides.
+        #----------------------------------------------------------------------
+        defaults = {'log_file': 'collector.log',
+                    'vel_port': '12233',
+                    'vel_path': '',
+                    'vel_topic_name': ''
+                   }
+        overrides = {}
+        config = ConfigParser.SafeConfigParser(defaults)
+        config.read(config_file)
+
+        #----------------------------------------------------------------------
+        # extract the values we want.
+        #----------------------------------------------------------------------
+        global influxdb
+        global vel_username
+        global vel_password
+        global vel_topic_name
+        global data_storage
+        
+        influxdb = config.get(config_section, 'influxdb', vars=overrides)
+        log_file = config.get(config_section, 'log_file', vars=overrides)
+        vel_port = config.get(config_section, 'vel_port', vars=overrides)
+        vel_path = config.get(config_section, 'vel_path', vars=overrides)
+        data_storage = config.get(config_section, 'data_storage', vars=overrides)
+
+        vel_topic_name = config.get(config_section,
+                                    'vel_topic_name',
+                                    vars=overrides)
+        vel_username = config.get(config_section,
+                                  'vel_username',
+                                  vars=overrides)
+        vel_password = config.get(config_section,
+                                  'vel_password',
+                                  vars=overrides)
+        vel_schema_file = config.get(config_section,
+                                     'schema_file',
+                                     vars=overrides)
+        base_schema_file = config.get(config_section,
+                                      'base_schema_file',
+                                      vars=overrides)
+        throttle_schema_file = config.get(config_section,
+                                          'throttle_schema_file',
+                                          vars=overrides)
+        test_control_schema_file = config.get(config_section,
+                                           'test_control_schema_file',
+                                           vars=overrides)
+
+        #----------------------------------------------------------------------
+        # Finally we have enough info to start a proper flow trace.
+        #----------------------------------------------------------------------
+        global logger
+        logger = logging.getLogger('monitor')
+        if verbose > 0:
+            logger.info('Verbose mode on')
+            logger.setLevel(logging.DEBUG)
+        else:
+            logger.setLevel(logging.INFO)
+        handler = logging.handlers.RotatingFileHandler(log_file,
+                                                       maxBytes=1000000,
+                                                       backupCount=10)
+        if (platform.system() == 'Windows'):
+            date_format = '%Y-%m-%d %H:%M:%S'
+        else:
+            date_format = '%Y-%m-%d %H:%M:%S.%f %z'
+        formatter = logging.Formatter('%(asctime)s %(name)s - '
+                                      '%(levelname)s - %(message)s',
+                                      date_format)
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
+        logger.info('Started') 
+
+        #----------------------------------------------------------------------
+        # Log the details of the configuration.
+        #----------------------------------------------------------------------
+        logger.debug('Log file = {0}'.format(log_file))
+        logger.debug('Influxdb server = {0}'.format(influxdb))
+        logger.debug('Event Listener Port = {0}'.format(vel_port))
+        logger.debug('Event Listener Path = {0}'.format(vel_path))
+        logger.debug('Event Listener Topic = {0}'.format(vel_topic_name))
+        logger.debug('Event Listener Username = {0}'.format(vel_username))
+        # logger.debug('Event Listener Password = {0}'.format(vel_password))
+        logger.debug('Event Listener JSON Schema File = {0}'.format(
+                                                              vel_schema_file))
+        logger.debug('Base JSON Schema File = {0}'.format(base_schema_file))
+        logger.debug('Throttle JSON Schema File = {0}'.format(
+                                                         throttle_schema_file))
+        logger.debug('Test Control JSON Schema File = {0}'.format(
+                                                     test_control_schema_file))
+
+        #----------------------------------------------------------------------
+        # Perform some basic error checking on the config.
+        #----------------------------------------------------------------------
+        if (int(vel_port) < 1024 or int(vel_port) > 65535):
+            logger.error('Invalid Vendor Event Listener port ({0}) '
+                         'specified'.format(vel_port))
+            raise RuntimeError('Invalid Vendor Event Listener port ({0}) '
+                               'specified'.format(vel_port))
+
+        if (len(vel_path) > 0 and vel_path[-1] != '/'):
+            logger.warning('Event Listener Path ({0}) should have terminating '
+                           '"/"!  Adding one on to configured string.'.format(
+                                                                     vel_path))
+            vel_path += '/'
+
+        #----------------------------------------------------------------------
+        # Load up the vel_schema, if it exists.
+        #----------------------------------------------------------------------
+        if not os.path.exists(vel_schema_file):     
+            logger.warning('Event Listener Schema File ({0}) not found. '
+                           'No validation will be undertaken.'.format(
+                                                              vel_schema_file))
+        else:
+            global vel_schema
+            global throttle_schema
+            global test_control_schema
+            vel_schema = json.load(open(vel_schema_file, 'r'))
+            logger.debug('Loaded the JSON schema file')
+
+            #------------------------------------------------------------------
+            # Load up the throttle_schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(throttle_schema_file)):
+                logger.debug('Loading throttle schema')
+                throttle_fragment = json.load(open(throttle_schema_file, 'r'))
+                throttle_schema = {}
+                throttle_schema.update(vel_schema)
+                throttle_schema.update(throttle_fragment)
+                logger.debug('Loaded the throttle schema')
+
+            #------------------------------------------------------------------
+            # Load up the test control _schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(test_control_schema_file)):
+                logger.debug('Loading test control schema')
+                test_control_fragment = json.load(
+                    open(test_control_schema_file, 'r'))
+                test_control_schema = {}
+                test_control_schema.update(vel_schema)
+                test_control_schema.update(test_control_fragment)
+                logger.debug('Loaded the test control schema')
+
+            #------------------------------------------------------------------
+            # Load up the base_schema, if it exists.
+            #------------------------------------------------------------------
+            if (os.path.exists(base_schema_file)):
+                logger.debug('Updating the schema with base definition')
+                base_schema = json.load(open(base_schema_file, 'r'))
+                vel_schema.update(base_schema)
+                logger.debug('Updated the JSON schema file')
+
+        #----------------------------------------------------------------------
+        # We are now ready to get started with processing. Start-up the various
+        # components of the system in order:
+        #
+        #  1) Create the dispatcher.
+        #  2) Register the functions for the URLs of interest.
+        #  3) Run the webserver.
+        #----------------------------------------------------------------------
+        root_url = '/{0}eventListener/v{1}{2}'.\
+                   format(vel_path,
+                          api_version,
+                          '/' + vel_topic_name
+                          if len(vel_topic_name) > 0
+                          else '')
+        throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\
+                       format(vel_path, api_version)
+        set_404_content(root_url)
+        dispatcher = PathDispatcher()
+        vendor_event_listener = partial(listener, schema = vel_schema)
+        dispatcher.register('GET', root_url, vendor_event_listener)
+        dispatcher.register('POST', root_url, vendor_event_listener)
+        vendor_throttle_listener = partial(listener, schema = throttle_schema)
+        dispatcher.register('GET', throttle_url, vendor_throttle_listener)
+        dispatcher.register('POST', throttle_url, vendor_throttle_listener)
+
+        #----------------------------------------------------------------------
+        # We also add a POST-only mechanism for test control, so that we can
+        # send commands to a single attached client.
+        #----------------------------------------------------------------------
+        test_control_url = '/testControl/v{0}/commandList'.format(api_version)
+        test_control_listener = partial(test_listener,
+                                        schema = test_control_schema)
+        dispatcher.register('POST', test_control_url, test_control_listener)
+        dispatcher.register('GET', test_control_url, test_control_listener)
+
+        httpd = make_server('', int(vel_port), dispatcher)
+        logger.info('Serving on port {0}...'.format(vel_port))
+        httpd.serve_forever()
+
+        logger.error('Main loop exited unexpectedly!')
+        return 0
+
+    except KeyboardInterrupt:
+        #----------------------------------------------------------------------
+        # handle keyboard interrupt
+        #----------------------------------------------------------------------
+        logger.info('Exiting on keyboard interrupt!')
+        return 0
+
+    except Exception as e:
+        #----------------------------------------------------------------------
+        # Handle unexpected exceptions.
+        #----------------------------------------------------------------------
+        if DEBUG or TESTRUN:
+            raise(e)
+        indent = len(program_name) * ' '
+        sys.stderr.write(program_name + ': ' + repr(e) + '\n')
+        sys.stderr.write(indent + '  for help use --help\n')
+        sys.stderr.write(traceback.format_exc())
+        logger.critical('Exiting because of exception: {0}'.format(e))
+        logger.critical(traceback.format_exc())
+        return 2
+
+#------------------------------------------------------------------------------
+# MAIN SCRIPT ENTRY POINT.
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+    if TESTRUN:
+        #----------------------------------------------------------------------
+        # Running tests - note that doctest comments haven't been included so
+        # this is a hook for future improvements.
+        #----------------------------------------------------------------------
+        import doctest
+        doctest.testmod()
+
+    if PROFILE:
+        #----------------------------------------------------------------------
+        # Profiling performance.  Performance isn't expected to be a major
+        # issue, but this should all work as expected.
+        #----------------------------------------------------------------------
+        import cProfile
+        import pstats
+        profile_filename = 'collector_profile.txt'
+        cProfile.run('main()', profile_filename)
+        statsfile = open('collector_profile_stats.txt', 'wb')
+        p = pstats.Stats(profile_filename, stream=statsfile)
+        stats = p.strip_dirs().sort_stats('cumulative')
+        stats.print_stats()
+        statsfile.close()
+        sys.exit(0)
+
+    #--------------------------------------------------------------------------
+    # Normal operation - call through to the main function.
+    #--------------------------------------------------------------------------
+    sys.exit(main())
diff --git a/collector/evel-test-collector/code/collector/rest_dispatcher.py b/collector/evel-test-collector/code/collector/rest_dispatcher.py
new file mode 100644 (file)
index 0000000..6911d5e
--- /dev/null
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+'''
+Simple dispatcher for the REST API.
+
+Only intended for test purposes.
+
+License
+-------
+
+Copyright(c) <2016>, AT&T Intellectual Property.  All other rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement:  This product includes
+   software developed by the AT&T.
+4. Neither the name of AT&T nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+'''
+
+import logging
+logger = logging.getLogger('collector.disp')
+
+base_url = ''
+
+template_404 = b'''POST {0}'''
+
+def set_404_content(url):
+    '''
+    Called at initialization to set the base URL so that we can serve helpful
+    diagnostics as part of the 404 response. 
+    '''
+    global base_url
+    base_url = url
+    return
+
+def notfound_404(environ, start_response):
+    '''
+    Serve the 404 Not Found response.
+    
+    Provides diagnostics in the 404 response showing the hierarchy of valid
+    REST resources.
+    '''
+    logger.warning('Unexpected URL/Method: {0} {1}'.format(
+                                             environ['REQUEST_METHOD'].upper(),
+                                             environ['PATH_INFO']))
+    start_response('404 Not Found', [ ('Content-type', 'text/plain') ])
+    return [template_404.format(base_url)]
+
+class PathDispatcher:
+    '''
+    A dispatcher which can take HTTP requests in a WSGI environment and invoke
+    appropriate methods for each request.
+    '''
+    def __init__(self):
+        '''Constructor: initialize the pathmap to be empty.'''
+        self.pathmap = { }
+
+    def __call__(self, environ, start_response):
+        '''
+        The main callable that the WSGI app will invoke with each request.
+        '''
+        #----------------------------------------------------------------------
+        # Extract the method and path from the environment.
+        #----------------------------------------------------------------------
+        method = environ['REQUEST_METHOD'].lower()
+        path = environ['PATH_INFO']
+        logger.info('Dispatcher called for: {0} {1}'.format(method, path))
+        logger.debug('Dispatcher environment is: {0}'.format(environ))
+
+        #----------------------------------------------------------------------
+        # See if we have a handler for this path, and if so invoke it.
+        # Otherwise, return a 404.
+        #----------------------------------------------------------------------
+        handler = self.pathmap.get((method, path), notfound_404)
+        logger.debug('Dispatcher will use handler: {0}'.format(handler))
+        return handler(environ, start_response)
+
+    def register(self, method, path, function):
+        '''
+        Register a handler for a method/path, adding it to the pathmap.
+        '''
+        logger.debug('Registering for {0} at {1}'.format(method, path))
+        print('Registering for {0} at {1}'.format(method, path))
+        self.pathmap[method.lower(), path] = function
+        return function
diff --git a/collector/evel-test-collector/code/collector/test_control.py b/collector/evel-test-collector/code/collector/test_control.py
new file mode 100644 (file)
index 0000000..e2ce816
--- /dev/null
@@ -0,0 +1,192 @@
+#!/usr/bin/env python
+'''
+Example script to inject a throttling command list to the test_collector.
+
+Only intended for test purposes.
+
+License
+-------
+
+Copyright(c) <2016>, AT&T Intellectual Property.  All other rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+3. All advertising materials mentioning features or use of this software
+   must display the following acknowledgement:  This product includes
+   software developed by the AT&T.
+4. Neither the name of AT&T nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+'''
+import optparse
+import requests
+import json
+
+###############################################################################
+# Functions to build up commandList contents
+###############################################################################
+def command_state():
+    "return a provideThrottlingState command"
+    return {'command':
+            {'commandType': 'provideThrottlingState'}}
+
+def command_interval(interval):
+    "return a measurementIntervalChange command"
+    return {'command':
+            {'commandType': 'measurementIntervalChange',
+             'measurementInterval': interval}}
+
+def command_throttle(domain, fields, pairs):
+    "return a throttlingSpecification"
+    throttle_spec = {'eventDomain' : domain}
+    if len(fields):
+        throttle_spec['suppressedFieldNames'] = fields
+    if len(pairs):
+        throttle_spec['suppressedNvPairsList'] = pairs
+    return {'command':
+            {'commandType': 'throttlingSpecification',
+             'eventDomainThrottleSpecification': throttle_spec}}
+
+def command_nvpairs(field_name, pair_names):
+    "return a suppressedNvPairs"
+    return {'nvPairFieldName' : field_name,
+            'suppressedNvPairNames' : pair_names}
+
+###############################################################################
+# Example functions to build up commandLists for various domains.
+###############################################################################
+def command_list_empty():
+    return {'commandList' : []}
+
+def command_list_provide():
+    return {'commandList' : [command_state()]}
+
+def command_list_interval(interval):
+    return {'commandList' : [command_interval(interval)]}
+
+def command_list_fault_suppress_fields():
+    "Throttling Specification - two suppressedFieldNames"
+    fields = ['alarmInterfaceA', 'alarmAdditionalInformation']
+    pairs = []
+    command_list = [command_throttle('fault', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_fault_suppress_nothing():
+    "Throttling Specification - no suppression"
+    fields = []
+    pairs = []
+    command_list = [command_throttle('fault', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_fault_suppress_pairs():
+    "Throttling Specification - two suppressedNvPairNames"
+    fields = []
+    pairs = [command_nvpairs('alarmAdditionalInformation',
+                                   ['name1', 'name2'])]
+    command_list = [command_throttle('fault', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_fault_suppress_fields_and_pairs():
+    "Throttling Specification - a mixture of fields and pairs"
+    fields = ['alarmInterfaceA']
+    pairs = [command_nvpairs('alarmAdditionalInformation',
+                                   ['name1', 'name2'])]
+    command_list = [command_throttle('fault', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_measurements_suppress_example():
+    "Throttling Specification - measurements"
+    fields = ['numberOfMediaPortsInUse', 'aggregateCpuUsage']
+    pairs = [command_nvpairs('cpuUsageArray',
+                             ['cpu1', 'cpu3'])]
+    command_list = [command_throttle('measurementsForVfScaling',
+                                     fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_mobile_flow_suppress_example():
+    "Throttling Specification - mobile flow"
+    fields = ['radioAccessTechnology', 'samplingAlgorithm']
+    pairs = []
+    command_list = [command_throttle('mobileFlow', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_state_change_suppress_example():
+    "Throttling Specification - state change"
+    fields = ['reportingEntityId', 'eventType', 'sourceId']
+    pairs = [command_nvpairs('additionalFields', ['Name1'])]
+    command_list = [command_throttle('stateChange', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_syslog_suppress_example():
+    "Throttling Specification - syslog"
+    fields = ['syslogFacility', 'syslogProc', 'syslogProcId']
+    pairs = [command_nvpairs('additionalFields', ['Name1', 'Name4'])]
+    command_list = [command_throttle('syslog', fields, pairs)]
+    return {'commandList' : command_list}
+
+def command_list_reset_all_domains():
+    "Throttling Specification - reset all domains"
+    command_list = [command_throttle('fault', [], []),
+                    command_throttle('measurementsForVfScaling', [], []),
+                    command_throttle('mobileFlow', [], []),
+                    command_throttle('stateChange', [], []),
+                    command_throttle('syslog', [], [])]
+    return {'commandList' : command_list}
+
+def mixed_example():
+    fields = ['alarmInterfaceA']
+    pairs = [command_nvpairs('alarmAdditionalInformation',
+                             ['name1', 'name2'])]
+    command_list = [command_throttle('fault', fields, pairs),
+                    command_interval(10),
+                    command_state()]
+    return {'commandList' : command_list}
+
+###############################################################################
+# Default command line values
+###############################################################################
+DEFAULT_FQDN = "127.0.0.1"
+DEFAULT_PORT = 30000
+
+###############################################################################
+# Command Line Parsing
+###############################################################################
+parser = optparse.OptionParser()
+parser.add_option('--fqdn',
+                  action="store",
+                  dest="fqdn",
+                  default=DEFAULT_FQDN)
+parser.add_option('--port',
+                  action="store",
+                  dest="port",
+                  default=DEFAULT_PORT,
+                  type="int")
+options, remainder = parser.parse_args()
+
+###############################################################################
+# Derive the Test Control URL
+###############################################################################
+url = 'http://%s:%d/testControl/v1.1/commandList'%(options.fqdn, options.port)
+
+###############################################################################
+# Create JSON and POST it to the Test Control URL.
+###############################################################################
+command_list = command_list_fault_suppress_fields_and_pairs()
+requests.post(url, json = command_list)
diff --git a/collector/evel-test-collector/config/README.md b/collector/evel-test-collector/config/README.md
new file mode 100644 (file)
index 0000000..32e59d0
--- /dev/null
@@ -0,0 +1 @@
+NOTE: This folder has not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
diff --git a/collector/evel-test-collector/config/collector.conf b/collector/evel-test-collector/config/collector.conf
new file mode 100644 (file)
index 0000000..5f05f50
--- /dev/null
@@ -0,0 +1,85 @@
+#------------------------------------------------------------------------------
+# This is a config file for the collector test harness.
+#
+# On the target platform is is expected to be located in:
+#
+#   /etc/opt/att/collector.conf
+#
+# However, the location can be overridden by setting the --config <file>
+# argument to the collector code.
+#------------------------------------------------------------------------------
+
+#------------------------------------------------------------------------------
+# The default section to be used. Alternative configuration sections can be
+# provided by using the --section <name> command-line parameter.
+#------------------------------------------------------------------------------
+[default]
+log_file = collector.log
+schema_file =  evel-test-collector/docs/att_interface_definition/CommonEventFormat_30.2.1_ONAP.json
+base_schema_file = evel-test-collector/docs/att_interface_definition/base_schema.json
+throttle_schema_file = evel-test-collector/docs/att_interface_definition/throttle_schema.json
+test_control_schema_file = evel-test-collector/docs/att_interface_definition/test_control_schema.json
+
+#------------------------------------------------------------------------------
+# Details of the Vendor Event Listener REST service.
+#
+# REST resources are defined with respect to a ServerRoot:
+#    ServerRoot = https://{Domain}:{Port}/{optionalRoutingPath}
+#
+# REST resources are of the form:
+#   *   {ServerRoot}/eventListener/v{apiVersion}
+#   *   {ServerRoot}/eventListener/v{apiVersion}/{topicName}
+#   *   {ServerRoot}/eventListener/v{apiVersion}/eventBatch
+#   *   {ServerRoot}/eventListener/v{apiVersion}/clientThrottlingState
+#
+# The "vel_topic_name" parameter is used as the "topicName" element in the path
+# and may be empty.
+#
+# Note that the path, if present,  should have no leading "/" but should have a
+# training "/".
+#------------------------------------------------------------------------------
+vel_domain = 127.0.0.1
+vel_port = 9999
+vel_path = 
+vel_username = 
+vel_password = 
+vel_topic_name = events
+data_storage = influxdb
+
+
+#------------------------------------------------------------------------------
+# Settings to be used when running in a windows test environment rather than
+# the target environment.
+#------------------------------------------------------------------------------
+[windows]
+log_file = ../../logs/collector.log
+schema_file = ../../docs/att_interface_definition/event_format_updated.json
+base_schema_file =
+throttle_schema_file = ../../docs/att_interface_definition/throttle_schema.json
+test_control_schema_file = ../../docs/att_interface_definition/test_control_schema.json
+
+#------------------------------------------------------------------------------
+# Details of the Vendor Event Listener REST service.
+#
+# REST resources are defined with respect to a ServerRoot:
+#    ServerRoot = https://{Domain}:{Port}/{optionalRoutingPath}
+#
+# REST resources are of the form:
+#   *   {ServerRoot}/eventListener/v{apiVersion}
+#   *   {ServerRoot}/eventListener/v{apiVersion}/{topicName}
+#   *   {ServerRoot}/eventListener/v{apiVersion}/eventBatch
+#   *   {ServerRoot}/eventListener/v{apiVersion}/clientThrottlingState
+#
+# The "vel_topic_name" parameter is used as the "topicName" element in the path
+# and may be empty.
+#
+# Note that the path, if present,  should have no leading "/" but should have a
+# training "/".
+#------------------------------------------------------------------------------
+vel_domain = 127.0.0.1
+vel_port = 9999
+vel_path =
+vel_username = user user user user
+vel_password = password password password password
+vel_topic_name =
+
diff --git a/collector/evel-test-collector/docs/att_interface_definition/CommonEventFormat_30.2.1_ONAP.json b/collector/evel-test-collector/docs/att_interface_definition/CommonEventFormat_30.2.1_ONAP.json
new file mode 100644 (file)
index 0000000..6ee76cb
--- /dev/null
@@ -0,0 +1,3091 @@
+{
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "title": "VES Event Listener Common Event Format",
+  "type": "object",
+  "properties": {
+    "event": {
+      "$ref": "#/definitions/event"
+    },
+    "eventList": {
+      "$ref": "#/definitions/eventList"
+    }
+  },
+  "definitions": {
+    "schemaHeaderBlock": {
+      "description": "schema date, version, author and associated API",
+      "type": "object",
+      "properties": {
+        "associatedApi": {
+          "description": "VES Event Listener",
+          "type": "string"
+        },
+        "lastUpdatedBy": {
+          "description": "damian.nowak@nokia.com",
+          "type": "string"
+        },
+        "schemaDate": {
+          "description": "Jan 04, 2021",
+          "type": "string"
+        },
+        "schemaVersion": {
+          "description": "30.2.1",
+          "type": "number"
+        }
+      }
+    },
+    "schemaLicenseAndCopyrightNotice": {
+      "description": "Copyright (c) 2020, AT&T Intellectual Property.  All rights reserved. Modification Copyright (c) 2021, Nokia Solutions and Networks.",
+      "type": "object",
+      "properties": {
+        "apacheLicense2.0": {
+          "description": "Licensed under the Apache License, Version 2.0 (the 'License'); you may not use this file except in compliance with the License. You may obtain a copy of the License at:",
+          "type": "string"
+        },
+        "licenseUrl": {
+          "description": "http://www.apache.org/licenses/LICENSE-2.0",
+          "type": "string"
+        },
+        "asIsClause": {
+          "description": "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
+          "type": "string"
+        },
+        "permissionsAndLimitations": {
+          "description": "See the License for the specific language governing permissions and limitations under the License.",
+          "type": "string"
+        }
+      }
+    },
+    "arrayOfJsonObject": {
+      "description": "array of json objects described by name, schema and other meta-information",
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/jsonObject"
+      }
+    },
+    "arrayOfNamedHashMap": {
+      "description": "array of named hashMaps",
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/namedHashMap"
+      }
+    },
+    "codecsInUse": {
+      "description": "number of times an identified codec was used over the measurementInterval",
+      "type": "object",
+      "properties": {
+        "codecIdentifier": {
+          "type": "string"
+        },
+        "numberInUse": {
+          "type": "integer"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "codecIdentifier",
+        "numberInUse"
+      ]
+    },
+    "commonEventHeader": {
+      "description": "fields common to all events",
+      "type": "object",
+      "properties": {
+        "domain": {
+          "description": "the eventing domain associated with the event",
+          "type": "string",
+          "enum": [
+            "fault",
+            "heartbeat",
+            "measurement",
+            "mobileFlow",
+            "notification",
+            "other",
+            "perf3gpp",
+            "pnfRegistration",
+            "sipSignaling",
+            "stateChange",
+            "stndDefined",
+            "syslog",
+            "thresholdCrossingAlert",
+            "voiceQuality"
+          ]
+        },
+        "eventId": {
+          "description": "event key that is unique to the event source",
+          "type": "string"
+        },
+        "eventName": {
+          "description": "unique event name",
+          "type": "string"
+        },
+        "eventType": {
+          "description": "for example - applicationNf, guestOS, hostOS, platform",
+          "type": "string"
+        },
+        "internalHeaderFields": {
+          "$ref": "#/definitions/internalHeaderFields"
+        },
+        "lastEpochMicrosec": {
+          "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
+          "type": "number"
+        },
+        "nfcNamingCode": {
+          "description": "3 character network function component type, aligned with vfc naming standards",
+          "type": "string"
+        },
+        "nfNamingCode": {
+          "description": "4 character network function type, aligned with nf naming standards",
+          "type": "string"
+        },
+        "nfVendorName": {
+          "description": "network function vendor name",
+          "type": "string"
+        },
+        "priority": {
+          "description": "processing priority",
+          "type": "string",
+          "enum": [
+            "High",
+            "Medium",
+            "Normal",
+            "Low"
+          ]
+        },
+        "reportingEntityId": {
+          "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process",
+          "type": "string"
+        },
+        "reportingEntityName": {
+          "description": "name of the entity reporting the event, for example, an EMS name; may be the same as sourceName",
+          "type": "string"
+        },
+        "sequence": {
+          "description": "ordering of events communicated by an event source instance or 0 if not needed",
+          "type": "integer"
+        },
+        "sourceId": {
+          "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process",
+          "type": "string"
+        },
+        "sourceName": {
+          "description": "name of the entity experiencing the event issue",
+          "type": "string"
+        },
+        "startEpochMicrosec": {
+          "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
+          "type": "number"
+        },
+        "stndDefinedNamespace": {
+          "description": "Standards organization defined event namespace; expected usage includes event routing by the event listener",
+          "type": "string"
+        },
+        "timeZoneOffset": {
+          "description": "UTC offset for the local time zone of the device as UTC+/-hh.mm",
+          "type": "string"
+        },
+        "version": {
+          "description": "version of the event header",
+          "type": "string",
+          "enum": [
+            "4.0",
+            "4.0.1",
+            "4.1"
+          ]
+        },
+        "vesEventListenerVersion": {
+          "description": "version of the VES Event Listener API",
+          "type": "string",
+          "enum": [
+            "7.0",
+            "7.0.1",
+            "7.1",
+            "7.1.1",
+            "7.2",
+            "7.2.1"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "domain",
+        "eventId",
+        "eventName",
+        "lastEpochMicrosec",
+        "priority",
+        "reportingEntityName",
+        "sequence",
+        "sourceName",
+        "startEpochMicrosec",
+        "version",
+        "vesEventListenerVersion"
+      ]
+    },
+    "counter": {
+      "description": "performance counter",
+      "type": "object",
+      "properties": {
+        "criticality": {
+          "type": "string",
+          "enum": [
+            "CRIT",
+            "MAJ"
+          ]
+        },
+        "hashMap": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "thresholdCrossed": {
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "criticality",
+        "hashMap",
+        "thresholdCrossed"
+      ]
+    },
+    "cpuUsage": {
+      "description": "usage of an identified CPU",
+      "type": "object",
+      "properties": {
+        "cpuCapacityContention": {
+          "description": "the amount of time the CPU cannot run due to contention, in milliseconds over the measurementInterval",
+          "type": "number"
+        },
+        "cpuDemandAvg": {
+          "description": "the total CPU time that the NF/NFC/VM could use if there was no contention, in milliseconds over the measurementInterval",
+          "type": "number"
+        },
+        "cpuDemandMhz": {
+          "description": "CPU demand in megahertz",
+          "type": "number"
+        },
+        "cpuDemandPct": {
+          "description": "CPU demand as a percentage of the provisioned capacity",
+          "type": "number"
+        },
+        "cpuIdentifier": {
+          "description": "cpu identifer",
+          "type": "string"
+        },
+        "cpuIdle": {
+          "description": "percentage of CPU time spent in the idle task",
+          "type": "number"
+        },
+        "cpuLatencyAvg": {
+          "description": "percentage of time the VM is unable to run because it is contending for access to the physical CPUs",
+          "type": "number"
+        },
+        "cpuOverheadAvg": {
+          "description": "the overhead demand above available allocations and reservations, in milliseconds over the measurementInterval",
+          "type": "number"
+        },
+        "cpuSwapWaitTime": {
+          "description": "swap wait time. in milliseconds over the measurementInterval",
+          "type": "number"
+        },
+        "cpuUsageInterrupt": {
+          "description": "percentage of time spent servicing interrupts",
+          "type": "number"
+        },
+        "cpuUsageNice": {
+          "description": "percentage of time spent running user space processes that have been niced",
+          "type": "number"
+        },
+        "cpuUsageSoftIrq": {
+          "description": "percentage of time spent handling soft irq interrupts",
+          "type": "number"
+        },
+        "cpuUsageSteal": {
+          "description": "percentage of time spent in involuntary wait which is neither user, system or idle time and is effectively time that went missing",
+          "type": "number"
+        },
+        "cpuUsageSystem": {
+          "description": "percentage of time spent on system tasks running the kernel",
+          "type": "number"
+        },
+        "cpuUsageUser": {
+          "description": "percentage of time spent running un-niced user space processes",
+          "type": "number"
+        },
+        "cpuWait": {
+          "description": "percentage of CPU time spent waiting for I/O operations to complete",
+          "type": "number"
+        },
+        "percentUsage": {
+          "description": "aggregate cpu usage of the virtual machine on which the xNFC reporting the event is running",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "cpuIdentifier",
+        "percentUsage"
+      ]
+    },
+    "diskUsage": {
+      "description": "usage of an identified disk",
+      "type": "object",
+      "properties": {
+        "diskBusResets": {
+          "description": "number of bus resets over the measurementInterval",
+          "type": "number"
+        },
+        "diskCommandsAborted": {
+          "description": "number of disk commands aborted over the measurementInterval",
+          "type": "number"
+        },
+        "diskCommandsAvg": {
+          "description": "average number of commands per second over the measurementInterval",
+          "type": "number"
+        },
+        "diskFlushRequests": {
+          "description": "total flush requests of the disk cache over the measurementInterval",
+          "type": "number"
+        },
+        "diskFlushTime": {
+          "description": "milliseconds spent on disk cache flushing over the measurementInterval",
+          "type": "number"
+        },
+        "diskIdentifier": {
+          "description": "disk identifier",
+          "type": "string"
+        },
+        "diskIoTimeAvg": {
+          "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms  matches 100% load; provide the average over the measurement interval",
+          "type": "number"
+        },
+        "diskIoTimeLast": {
+          "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms  matches 100% load; provide the last value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskIoTimeMax": {
+          "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms  matches 100% load; provide the maximum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskIoTimeMin": {
+          "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms  matches 100% load; provide the minimum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedReadAvg": {
+          "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedReadLast": {
+          "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the last value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedReadMax": {
+          "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the maximum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedReadMin": {
+          "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the minimum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedWriteAvg": {
+          "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedWriteLast": {
+          "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the last value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedWriteMax": {
+          "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the maximum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskMergedWriteMin": {
+          "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the minimum value measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsReadAvg": {
+          "description": "number of octets per second read from a disk or partition; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsReadLast": {
+          "description": "number of octets per second read from a disk or partition; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsReadMax": {
+          "description": "number of octets per second read from a disk or partition; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsReadMin": {
+          "description": "number of octets per second read from a disk or partition; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsWriteAvg": {
+          "description": "number of octets per second written to a disk or partition; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsWriteLast": {
+          "description": "number of octets per second written to a disk or partition; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsWriteMax": {
+          "description": "number of octets per second written to a disk or partition; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOctetsWriteMin": {
+          "description": "number of octets per second written to a disk or partition; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsReadAvg": {
+          "description": "number of read operations per second issued to the disk; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsReadLast": {
+          "description": "number of read operations per second issued to the disk; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsReadMax": {
+          "description": "number of read operations per second issued to the disk; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsReadMin": {
+          "description": "number of read operations per second issued to the disk; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsWriteAvg": {
+          "description": "number of write operations per second issued to the disk; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsWriteLast": {
+          "description": "number of write operations per second issued to the disk; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsWriteMax": {
+          "description": "number of write operations per second issued to the disk; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskOpsWriteMin": {
+          "description": "number of write operations per second issued to the disk; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskPendingOperationsAvg": {
+          "description": "queue size of pending I/O operations per second; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskPendingOperationsLast": {
+          "description": "queue size of pending I/O operations per second; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskPendingOperationsMax": {
+          "description": "queue size of pending I/O operations per second; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskPendingOperationsMin": {
+          "description": "queue size of pending I/O operations per second; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskReadCommandsAvg": {
+          "description": "average number of read commands issued per second to the disk over the measurementInterval",
+          "type": "number"
+        },
+        "diskTime": {
+          "description": "nanoseconds spent on disk cache reads/writes within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeReadAvg": {
+          "description": "milliseconds a read operation took to complete; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeReadLast": {
+          "description": "milliseconds a read operation took to complete; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeReadMax": {
+          "description": "milliseconds a read operation took to complete; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeReadMin": {
+          "description": "milliseconds a read operation took to complete; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeWriteAvg": {
+          "description": "milliseconds a write operation took to complete; provide the average measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeWriteLast": {
+          "description": "milliseconds a write operation took to complete; provide the last measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeWriteMax": {
+          "description": "milliseconds a write operation took to complete; provide the maximum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTimeWriteMin": {
+          "description": "milliseconds a write operation took to complete; provide the minimum measurement within the measurement interval",
+          "type": "number"
+        },
+        "diskTotalReadLatencyAvg": {
+          "description": "average read time from the perspective of a Guest OS: sum of the Kernel Read Latency and Physical Device Read Latency in milliseconds over the measurement interval",
+          "type": "number"
+        },
+        "diskTotalWriteLatencyAvg": {
+          "description": "average write time from the perspective of a Guest OS: sum of the Kernel Write Latency and Physical Device Write Latency in milliseconds over the measurement interval",
+          "type": "number"
+        },
+        "diskWeightedIoTimeAvg": {
+          "description": "measure in ms over 1 sec of both I/O completion time and the backlog that may be accumulating; value is the average within the collection interval",
+          "type": "number"
+        },
+        "diskWeightedIoTimeLast": {
+          "description": "measure in ms over 1 sec of both I/O completion time and the backlog that may be accumulating; value is the last within the collection interval",
+          "type": "number"
+        },
+        "diskWeightedIoTimeMax": {
+          "description": "measure in ms over 1 sec of both I/O completion time and the backlog that may be accumulating; value is the maximum within the collection interval",
+          "type": "number"
+        },
+        "diskWeightedIoTimeMin": {
+          "description": "measure in ms over 1 sec of both I/O completion time and the backlog that may be accumulating; value is the minimum within the collection interval",
+          "type": "number"
+        },
+        "diskWriteCommandsAvg": {
+          "description": "average number of write commands issued per second to the disk over the measurementInterval",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "diskIdentifier"
+      ]
+    },
+    "endOfCallVqmSummaries": {
+      "description": "provides end of call voice quality metrics",
+      "type": "object",
+      "properties": {
+        "adjacencyName": {
+          "description": " adjacency name",
+          "type": "string"
+        },
+        "endpointAverageJitter": {
+          "description": "endpoint average jitter",
+          "type": "number"
+        },
+        "endpointDescription": {
+          "description": "either Caller or Callee",
+          "type": "string",
+          "enum": [
+            "Caller",
+            "Callee"
+          ]
+        },
+        "endpointMaxJitter": {
+          "description": "endpoint maximum jitter",
+          "type": "number"
+        },
+        "endpointRtpOctetsDiscarded": {
+          "description": "",
+          "type": "number"
+        },
+        "endpointRtpOctetsLost": {
+          "description": "endpoint RTP octets lost",
+          "type": "number"
+        },
+        "endpointRtpOctetsReceived": {
+          "description": "",
+          "type": "number"
+        },
+        "endpointRtpOctetsSent": {
+          "description": "",
+          "type": "number"
+        },
+        "endpointRtpPacketsDiscarded": {
+          "description": "",
+          "type": "number"
+        },
+        "endpointRtpPacketsLost": {
+          "description": "endpoint RTP packets lost",
+          "type": "number"
+        },
+        "endpointRtpPacketsReceived": {
+          "description": "",
+          "type": "number"
+        },
+        "endpointRtpPacketsSent": {
+          "description": "",
+          "type": "number"
+        },
+        "localAverageJitter": {
+          "description": "Local average jitter",
+          "type": "number"
+        },
+        "localAverageJitterBufferDelay": {
+          "description": "Local average jitter delay",
+          "type": "number"
+        },
+        "localMaxJitter": {
+          "description": "Local maximum jitter",
+          "type": "number"
+        },
+        "localMaxJitterBufferDelay": {
+          "description": "Local maximum jitter delay",
+          "type": "number"
+        },
+        "localRtpOctetsDiscarded": {
+          "description": "",
+          "type": "number"
+        },
+        "localRtpOctetsLost": {
+          "description": "Local RTP octets lost",
+          "type": "number"
+        },
+        "localRtpOctetsReceived": {
+          "description": "",
+          "type": "number"
+        },
+        "localRtpOctetsSent": {
+          "description": "",
+          "type": "number"
+        },
+        "localRtpPacketsDiscarded": {
+          "description": "",
+          "type": "number"
+        },
+        "localRtpPacketsLost": {
+          "description": "Local RTP packets lost",
+          "type": "number"
+        },
+        "localRtpPacketsReceived": {
+          "description": "",
+          "type": "number"
+        },
+        "localRtpPacketsSent": {
+          "description": "",
+          "type": "number"
+        },
+        "mosCqe": {
+          "description": "1-5 1dp",
+          "type": "number"
+        },
+        "oneWayDelay": {
+          "description": "one-way path delay in milliseconds",
+          "type": "number"
+        },
+        "packetLossPercent": {
+          "description": "Calculated percentage packet loss based on Endpoint RTP packets lost (as reported in RTCP) and Local RTP packets sent. Direction is based on Endpoint description (Caller, Callee). Decimal (2 dp)",
+          "type": "number"
+        },
+        "rFactor": {
+          "description": "0-100",
+          "type": "number"
+        },
+        "roundTripDelay": {
+          "description": "millisecs",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "adjacencyName",
+        "endpointDescription"
+      ]
+    },
+    "event": {
+      "description": "the root level of the common event format",
+      "type": "object",
+      "properties": {
+        "commonEventHeader": {
+          "$ref": "#/definitions/commonEventHeader"
+        },
+        "faultFields": {
+          "$ref": "#/definitions/faultFields"
+        },
+        "heartbeatFields": {
+          "$ref": "#/definitions/heartbeatFields"
+        },
+        "measurementFields": {
+          "$ref": "#/definitions/measurementFields"
+        },
+        "mobileFlowFields": {
+          "$ref": "#/definitions/mobileFlowFields"
+        },
+        "notificationFields": {
+          "$ref": "#/definitions/notificationFields"
+        },
+        "otherFields": {
+          "$ref": "#/definitions/otherFields"
+        },
+        "perf3gppFields": {
+          "$ref": "#/definitions/perf3gppFields"
+        },
+        "pnfRegistrationFields": {
+          "$ref": "#/definitions/pnfRegistrationFields"
+        },
+        "sipSignalingFields": {
+          "$ref": "#/definitions/sipSignalingFields"
+        },
+        "stateChangeFields": {
+          "$ref": "#/definitions/stateChangeFields"
+        },
+        "stndDefinedFields": {
+          "$ref": "#/definitions/stndDefinedFields"
+        },
+        "syslogFields": {
+          "$ref": "#/definitions/syslogFields"
+        },
+        "thresholdCrossingAlertFields": {
+          "$ref": "#/definitions/thresholdCrossingAlertFields"
+        },
+        "voiceQualityFields": {
+          "$ref": "#/definitions/voiceQualityFields"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "commonEventHeader"
+      ]
+    },
+    "eventList": {
+      "description": "array of events",
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/event"
+      }
+    },
+    "faultFields": {
+      "description": "fields specific to fault events",
+      "type": "object",
+      "properties": {
+        "alarmAdditionalInformation": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "alarmCondition": {
+          "description": "alarm condition reported by the device",
+          "type": "string"
+        },
+        "alarmInterfaceA": {
+          "description": "card, port, channel or interface name of the device generating the alarm",
+          "type": "string"
+        },
+        "eventCategory": {
+          "description": "Event category, for example: license, link, routing, security, signaling",
+          "type": "string"
+        },
+        "eventSeverity": {
+          "description": "event severity",
+          "type": "string",
+          "enum": [
+            "CRITICAL",
+            "MAJOR",
+            "MINOR",
+            "WARNING",
+            "NORMAL"
+          ]
+        },
+        "eventSourceType": {
+          "description": "type of event source; examples: card, host, other, port, portThreshold, router, slotThreshold, switch, virtualMachine, virtualNetworkFunction",
+          "type": "string"
+        },
+        "faultFieldsVersion": {
+          "description": "version of the faultFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        },
+        "specificProblem": {
+          "description": "short description of the alarm or problem",
+          "type": "string"
+        },
+        "vfStatus": {
+          "description": "virtual function status enumeration",
+          "type": "string",
+          "enum": [
+            "Active",
+            "Idle",
+            "Preparing to terminate",
+            "Ready to terminate",
+            "Requesting termination"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "alarmCondition",
+        "eventSeverity",
+        "eventSourceType",
+        "faultFieldsVersion",
+        "specificProblem",
+        "vfStatus"
+      ]
+    },
+    "filesystemUsage": {
+      "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second",
+      "type": "object",
+      "properties": {
+        "blockConfigured": {
+          "type": "number"
+        },
+        "blockIops": {
+          "type": "number"
+        },
+        "blockUsed": {
+          "type": "number"
+        },
+        "ephemeralConfigured": {
+          "type": "number"
+        },
+        "ephemeralIops": {
+          "type": "number"
+        },
+        "ephemeralUsed": {
+          "type": "number"
+        },
+        "filesystemName": {
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "blockConfigured",
+        "blockIops",
+        "blockUsed",
+        "ephemeralConfigured",
+        "ephemeralIops",
+        "ephemeralUsed",
+        "filesystemName"
+      ]
+    },
+    "gtpPerFlowMetrics": {
+      "description": "Mobility GTP Protocol per flow metrics",
+      "type": "object",
+      "properties": {
+        "avgBitErrorRate": {
+          "description": "average bit error rate",
+          "type": "number"
+        },
+        "avgPacketDelayVariation": {
+          "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets",
+          "type": "number"
+        },
+        "avgPacketLatency": {
+          "description": "average delivery latency",
+          "type": "number"
+        },
+        "avgReceiveThroughput": {
+          "description": "average receive throughput",
+          "type": "number"
+        },
+        "avgTransmitThroughput": {
+          "description": "average transmit throughput",
+          "type": "number"
+        },
+        "durConnectionFailedStatus": {
+          "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval",
+          "type": "number"
+        },
+        "durTunnelFailedStatus": {
+          "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval",
+          "type": "number"
+        },
+        "flowActivatedBy": {
+          "description": "Endpoint activating the flow",
+          "type": "string"
+        },
+        "flowActivationEpoch": {
+          "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available",
+          "type": "number"
+        },
+        "flowActivationMicrosec": {
+          "description": "Integer microseconds for the start of the flow connection",
+          "type": "number"
+        },
+        "flowActivationTime": {
+          "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+          "type": "string"
+        },
+        "flowDeactivatedBy": {
+          "description": "Endpoint deactivating the flow",
+          "type": "string"
+        },
+        "flowDeactivationEpoch": {
+          "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time",
+          "type": "number"
+        },
+        "flowDeactivationMicrosec": {
+          "description": "Integer microseconds for the start of the flow connection",
+          "type": "number"
+        },
+        "flowDeactivationTime": {
+          "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+          "type": "string"
+        },
+        "flowStatus": {
+          "description": "connection status at reporting time as a working / inactive / failed indicator value",
+          "type": "string"
+        },
+        "gtpConnectionStatus": {
+          "description": "Current connection state at reporting time",
+          "type": "string"
+        },
+        "gtpTunnelStatus": {
+          "description": "Current tunnel state  at reporting time",
+          "type": "string"
+        },
+        "ipTosCountList": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "ipTosList": {
+          "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'",
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "largePacketRtt": {
+          "description": "large packet round trip time",
+          "type": "number"
+        },
+        "largePacketThreshold": {
+          "description": "large packet threshold being applied",
+          "type": "number"
+        },
+        "maxPacketDelayVariation": {
+          "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets",
+          "type": "number"
+        },
+        "maxReceiveBitRate": {
+          "description": "maximum receive bit rate",
+          "type": "number"
+        },
+        "maxTransmitBitRate": {
+          "description": "maximum transmit bit rate",
+          "type": "number"
+        },
+        "mobileQciCosCountList": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "mobileQciCosList": {
+          "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow",
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "numActivationFailures": {
+          "description": "Number of failed activation requests, as observed by the reporting node",
+          "type": "number"
+        },
+        "numBitErrors": {
+          "description": "number of errored bits",
+          "type": "number"
+        },
+        "numBytesReceived": {
+          "description": "number of bytes received, including retransmissions",
+          "type": "number"
+        },
+        "numBytesTransmitted": {
+          "description": "number of bytes transmitted, including retransmissions",
+          "type": "number"
+        },
+        "numDroppedPackets": {
+          "description": "number of received packets dropped due to errors per virtual interface",
+          "type": "number"
+        },
+        "numGtpEchoFailures": {
+          "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2",
+          "type": "number"
+        },
+        "numGtpTunnelErrors": {
+          "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1",
+          "type": "number"
+        },
+        "numHttpErrors": {
+          "description": "Http error count",
+          "type": "number"
+        },
+        "numL7BytesReceived": {
+          "description": "number of tunneled layer 7 bytes received, including retransmissions",
+          "type": "number"
+        },
+        "numL7BytesTransmitted": {
+          "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions",
+          "type": "number"
+        },
+        "numLostPackets": {
+          "description": "number of lost packets",
+          "type": "number"
+        },
+        "numOutOfOrderPackets": {
+          "description": "number of out-of-order packets",
+          "type": "number"
+        },
+        "numPacketErrors": {
+          "description": "number of errored packets",
+          "type": "number"
+        },
+        "numPacketsReceivedExclRetrans": {
+          "description": "number of packets received, excluding retransmission",
+          "type": "number"
+        },
+        "numPacketsReceivedInclRetrans": {
+          "description": "number of packets received, including retransmission",
+          "type": "number"
+        },
+        "numPacketsTransmittedInclRetrans": {
+          "description": "number of packets transmitted, including retransmissions",
+          "type": "number"
+        },
+        "numRetries": {
+          "description": "number of packet retries",
+          "type": "number"
+        },
+        "numTimeouts": {
+          "description": "number of packet timeouts",
+          "type": "number"
+        },
+        "numTunneledL7BytesReceived": {
+          "description": "number of tunneled layer 7 bytes received, excluding retransmissions",
+          "type": "number"
+        },
+        "roundTripTime": {
+          "description": "round trip time",
+          "type": "number"
+        },
+        "tcpFlagCountList": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "tcpFlagList": {
+          "description": "Array of unique TCP Flags observed in the flow",
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "timeToFirstByte": {
+          "description": "Time in milliseconds between the connection activation and first byte received",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "avgBitErrorRate",
+        "avgPacketDelayVariation",
+        "avgPacketLatency",
+        "avgReceiveThroughput",
+        "avgTransmitThroughput",
+        "flowActivationEpoch",
+        "flowActivationMicrosec",
+        "flowDeactivationEpoch",
+        "flowDeactivationMicrosec",
+        "flowDeactivationTime",
+        "flowStatus",
+        "maxPacketDelayVariation",
+        "numActivationFailures",
+        "numBitErrors",
+        "numBytesReceived",
+        "numBytesTransmitted",
+        "numDroppedPackets",
+        "numL7BytesReceived",
+        "numL7BytesTransmitted",
+        "numLostPackets",
+        "numOutOfOrderPackets",
+        "numPacketErrors",
+        "numPacketsReceivedExclRetrans",
+        "numPacketsReceivedInclRetrans",
+        "numPacketsTransmittedInclRetrans",
+        "numRetries",
+        "numTimeouts",
+        "numTunneledL7BytesReceived",
+        "roundTripTime",
+        "timeToFirstByte"
+      ]
+    },
+    "hashMap": {
+      "description": "an associative array which is an array of key:value pairs",
+      "type": "object",
+      "additionalProperties": {
+        "type": "string"
+      },
+      "default": {}
+    },
+    "heartbeatFields": {
+      "description": "optional field block for fields specific to heartbeat events",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "heartbeatFieldsVersion": {
+          "description": "version of the heartbeatFields block",
+          "type": "string",
+          "enum": [
+            "3.0"
+          ]
+        },
+        "heartbeatInterval": {
+          "description": "current heartbeat interval in seconds",
+          "type": "integer"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "heartbeatFieldsVersion",
+        "heartbeatInterval"
+      ]
+    },
+    "hugePages": {
+      "description": "metrics on system hugepages",
+      "type": "object",
+      "properties": {
+        "bytesFree": {
+          "description": "number of free hugepages in bytes",
+          "type": "number"
+        },
+        "bytesUsed": {
+          "description": "number of used hugepages in bytes",
+          "type": "number"
+        },
+        "hugePagesIdentifier": {
+          "description": "hugePages identifier",
+          "type": "string"
+        },
+        "percentFree": {
+          "description": "number of free hugepages in percent",
+          "type": "number"
+        },
+        "percentUsed": {
+          "description": "number of free hugepages in percent",
+          "type": "number"
+        },
+        "vmPageNumberFree": {
+          "description": "number of free vmPages in numbers",
+          "type": "number"
+        },
+        "vmPageNumberUsed": {
+          "description": "number of used vmPages in numbers",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "hugePagesIdentifier"
+      ]
+    },
+    "internalHeaderFields": {
+      "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources",
+      "type": "object"
+    },
+    "ipmi": {
+      "description": "intelligent platform management interface metrics",
+      "type": "object",
+      "properties": {
+        "exitAirTemperature": {
+          "description": "system fan exit air flow temperature in celsius",
+          "type": "number"
+        },
+        "frontPanelTemperature": {
+          "description": "front panel temperature in celsius",
+          "type": "number"
+        },
+        "ioModuleTemperature": {
+          "description": "io module temperature in celsius",
+          "type": "number"
+        },
+        "ipmiBaseboardTemperatureArray": {
+          "description": "array of ipmiBaseboardTemperature objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiBaseboardTemperature"
+          }
+        },
+        "ipmiBaseboardVoltageRegulatorArray": {
+          "description": "array of ipmiBaseboardVoltageRegulator objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiBaseboardVoltageRegulator"
+          }
+        },
+        "ipmiBatteryArray": {
+          "description": "array of ipmiBattery objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiBattery"
+          }
+        },
+        "ipmiFanArray": {
+          "description": "array of ipmiFan objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiFan"
+          }
+        },
+        "ipmiHsbpArray": {
+          "description": "array of ipmiHsbp objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiHsbp"
+          }
+        },
+        "ipmiGlobalAggregateTemperatureMarginArray": {
+          "description": "array of ipmiGlobalAggregateTemperatureMargin objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiGlobalAggregateTemperatureMargin"
+          }
+        },
+        "ipmiNicArray": {
+          "description": "array of ipmiNic objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiNic"
+          }
+        },
+        "ipmiPowerSupplyArray": {
+          "description": "array of ipmiPowerSupply objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiPowerSupply"
+          }
+        },
+        "ipmiProcessorArray": {
+          "description": "array of ipmiProcessor objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/ipmiProcessor"
+          }
+        },
+        "systemAirflow": {
+          "description": "airfflow in cubic feet per minute (cfm)",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false
+    },
+    "ipmiBaseboardTemperature": {
+      "description": "intelligent platform management interface (ipmi) baseboard temperature metrics",
+      "type": "object",
+      "properties": {
+        "baseboardTemperatureIdentifier": {
+          "description": "identifier for the location where the temperature is taken",
+          "type": "string"
+        },
+        "baseboardTemperature": {
+          "description": "baseboard temperature in celsius",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "baseboardTemperatureIdentifier"
+      ]
+    },
+    "ipmiBaseboardVoltageRegulator": {
+      "description": "intelligent platform management interface (ipmi) baseboard voltage regulator metrics",
+      "type": "object",
+      "properties": {
+        "baseboardVoltageRegulatorIdentifier": {
+          "description": "identifier for the baseboard voltage regulator",
+          "type": "string"
+        },
+        "voltageRegulatorTemperature": {
+          "description": "voltage regulator temperature in celsius",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "baseboardVoltageRegulatorIdentifier"
+      ]
+    },
+    "ipmiBattery": {
+      "description": "intelligent platform management interface (ipmi) battery metrics",
+      "type": "object",
+      "properties": {
+        "batteryIdentifier": {
+          "description": "identifier for the battery",
+          "type": "string"
+        },
+        "batteryType": {
+          "description": "type of battery",
+          "type": "string"
+        },
+        "batteryVoltageLevel": {
+          "description": "battery voltage level",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "batteryIdentifier"
+      ]
+    },
+    "ipmiFan": {
+      "description": "intelligent platform management interface (ipmi) fan metrics",
+      "type": "object",
+      "properties": {
+        "fanIdentifier": {
+          "description": "identifier for the fan",
+          "type": "string"
+        },
+        "fanSpeed": {
+          "description": "fan speed in revolutions per minute (rpm)",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "fanIdentifier"
+      ]
+    },
+    "ipmiGlobalAggregateTemperatureMargin": {
+      "description": "intelligent platform management interface (ipmi) global aggregate temperature margin",
+      "type": "object",
+      "properties": {
+        "globalAggregateTemperatureMarginIdentifier": {
+          "description": "identifier for the ipmi global aggregate temperature margin metrics",
+          "type": "string"
+        },
+        "globalAggregateTemperatureMargin": {
+          "description": "the difference between the current global aggregate temperature, in celsius, and the global aggregate throttling thermal trip point",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "globalAggregateTemperatureMarginIdentifier",
+        "globalAggregateTemperatureMargin"
+      ]
+    },
+    "ipmiHsbp": {
+      "description": "intelligent platform management interface (ipmi) hot swap backplane power metrics",
+      "type": "object",
+      "properties": {
+        "hsbpIdentifier": {
+          "description": "identifier for the hot swap backplane power unit",
+          "type": "string"
+        },
+        "hsbpTemperature": {
+          "description": "hot swap backplane power temperature in celsius",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "hsbpIdentifier"
+      ]
+    },
+    "ipmiNic": {
+      "description": "intelligent platform management interface (ipmi) network interface control card (nic) metrics",
+      "type": "object",
+      "properties": {
+        "nicIdentifier": {
+          "description": "identifier for the network interface control card",
+          "type": "string"
+        },
+        "nicTemperature": {
+          "description": "nic temperature in celsius",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "nicIdentifier"
+      ]
+    },
+    "ipmiPowerSupply": {
+      "description": "intelligent platform management interface (ipmi) power supply metrics",
+      "type": "object",
+      "properties": {
+        "powerSupplyIdentifier": {
+          "description": "identifier for the power supply",
+          "type": "string"
+        },
+        "powerSupplyInputPower": {
+          "description": "input power in watts",
+          "type": "number"
+        },
+        "powerSupplyCurrentOutputPercent": {
+          "description": "current output voltage as a percentage of the design specified level",
+          "type": "number"
+        },
+        "powerSupplyTemperature": {
+          "description": "power supply temperature in celsius",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "powerSupplyIdentifier"
+      ]
+    },
+    "ipmiProcessor": {
+      "description": "intelligent platform management interface processor metrics",
+      "type": "object",
+      "properties": {
+        "processorIdentifier": {
+          "description": "identifier for an ipmi processor",
+          "type": "string"
+        },
+        "processorThermalControlPercent": {
+          "description": "io module temperature in celsius",
+          "type": "number"
+        },
+        "processorDtsThermalMargin": {
+          "description": "front panel temperature in celsius",
+          "type": "number"
+        },
+        "processorDimmAggregateThermalMarginArray": {
+          "description": "array of processorDimmAggregateThermalMargin objects",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/processorDimmAggregateThermalMargin"
+          }
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "processorIdentifier"
+      ]
+    },
+    "jsonObject": {
+      "description": "json object schema, name and other meta-information along with one or more object instances",
+      "type": "object",
+      "properties": {
+        "objectInstances": {
+          "description": "one or more instances of the jsonObject",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/jsonObjectInstance"
+          }
+        },
+        "objectName": {
+          "description": "name of the JSON Object",
+          "type": "string"
+        },
+        "objectSchema": {
+          "description": "json schema for the object",
+          "type": "string"
+        },
+        "objectSchemaUrl": {
+          "description": "Url to the json schema for the object",
+          "type": "string"
+        },
+        "nfSubscribedObjectName": {
+          "description": "name of the object associated with the nfSubscriptonId",
+          "type": "string"
+        },
+        "nfSubscriptionId": {
+          "description": "identifies an openConfig telemetry subscription on a network function, which configures the network function to send complex object data associated with the jsonObject",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "objectInstances",
+        "objectName"
+      ]
+    },
+    "jsonObjectInstance": {
+      "description": "meta-information about an instance of a jsonObject along with the actual object instance",
+      "type": "object",
+      "properties": {
+        "jsonObject": {
+          "$ref": "#/definitions/jsonObject"
+        },
+        "objectInstance": {
+          "description": "an instance conforming to the jsonObject objectSchema",
+          "type": "object"
+        },
+        "objectInstanceEpochMicrosec": {
+          "description": "the unix time aka epoch time associated with this objectInstance--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
+          "type": "number"
+        },
+        "objectKeys": {
+          "description": "an ordered set of keys that identifies this particular instance of jsonObject",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/key"
+          }
+        }
+      },
+      "additionalProperties": false
+    },
+    "key": {
+      "description": "tuple which provides the name of a key along with its value and relative order",
+      "type": "object",
+      "properties": {
+        "keyName": {
+          "description": "name of the key",
+          "type": "string"
+        },
+        "keyOrder": {
+          "description": "relative sequence or order of the key with respect to other keys",
+          "type": "integer"
+        },
+        "keyValue": {
+          "description": "value of the key",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "keyName"
+      ]
+    },
+    "latencyBucketMeasure": {
+      "description": "number of counts falling within a defined latency bucket",
+      "type": "object",
+      "properties": {
+        "countsInTheBucket": {
+          "type": "number"
+        },
+        "highEndOfLatencyBucket": {
+          "type": "number"
+        },
+        "lowEndOfLatencyBucket": {
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "countsInTheBucket"
+      ]
+    },
+    "load": {
+      "description": "/proc/loadavg cpu utilization and io utilization metrics",
+      "type": "object",
+      "properties": {
+        "longTerm": {
+          "description": "number of jobs in the run queue (state R, cpu utilization) or waiting for disk I/O (state D, io utilization) averaged over 15 minutes using /proc/loadavg",
+          "type": "number"
+        },
+        "midTerm": {
+          "description": "number of jobs in the run queue (state R, cpu utilization) or waiting for disk I/O (state D, io utilization) averaged over 5 minutes using /proc/loadavg",
+          "type": "number"
+        },
+        "shortTerm": {
+          "description": "number of jobs in the run queue (state R, cpu utilization) or waiting for disk I/O (state D, io utilization) averaged over 1 minute using /proc/loadavg",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false
+    },
+    "machineCheckException": {
+      "description": "metrics on vm machine check exceptions",
+      "type": "object",
+      "properties": {
+        "correctedMemoryErrors": {
+          "description": "total hardware errors that were corrected by the hardware (e.g. data corruption corrected via  ECC) over the measurementInterval",
+          "type": "number"
+        },
+        "correctedMemoryErrorsIn1Hr": {
+          "description": "total hardware errors that were corrected by the hardware over the last one hour",
+          "type": "number"
+        },
+        "uncorrectedMemoryErrors": {
+          "description": "total uncorrected hardware errors that were detected by the hardware (e.g., causing data corruption) over the measurementInterval",
+          "type": "number"
+        },
+        "uncorrectedMemoryErrorsIn1Hr": {
+          "description": "total uncorrected hardware errors that were detected by the hardware over the last one hour",
+          "type": "number"
+        },
+        "vmIdentifier": {
+          "description": "virtual machine identifier associated with the machine check exception",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "vmIdentifier"
+      ]
+    },
+    "measDataCollection": {
+      "description": "3GPP measurement collection structure aligned with 3GPP PM format",
+      "type": "object",
+      "properties": {
+        "formatVersion": {
+          "description": "3gpp PM reporting file format version from pre-standard TS 28.550 v2.0.0",
+          "type": "string"
+        },
+        "granularityPeriod": {
+          "description": "granularity period for the PM report in seconds",
+          "type": "number"
+        },
+        "measInfoList": {
+          "description": "array of measurements",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/measInfo"
+          }
+        },
+        "measObjInstIdList": {
+          "description": "array of monitored object local distinguished name ids per 3GPP TS 32.300",
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "measuredEntityDn": {
+          "description": "distinguished name per 3GPP TS 28.550",
+          "type": "string"
+        },
+        "measuredEntitySoftwareVersion": {
+          "description": "software version for the NF providing the PM data as specified in 3GPP TS 28.550",
+          "type": "string"
+        },
+        "measuredEntityUserName": {
+          "description": "user definable name for the measured object per 3GPP TS 28.550",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "granularityPeriod",
+        "measInfoList",
+        "measuredEntityDn"
+      ]
+    },
+    "measInfo": {
+      "description": "measurement information.",
+      "type": "object",
+      "properties": {
+        "jobId": {
+          "description": "name of the measurement job",
+          "type": "string"
+        },
+        "measInfoId": {
+          "description": "measurement group identifier",
+          "oneOf": [
+            {
+              "$ref": "#/definitions/measInfoIdInteger"
+            },
+            {
+              "$ref": "#/definitions/measInfoIdString"
+            }
+          ]
+        },
+        "measTypes": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/measTypesInteger"
+            },
+            {
+              "$ref": "#/definitions/measTypesString"
+            }
+          ]
+        },
+        "measValuesList": {
+          "description": "an array of measurement values",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/measValues"
+          }
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "measTypes",
+        "measValuesList"
+      ]
+    },
+    "measInfoIdInteger": {
+      "description": "integer measurement group identifier",
+      "type": "object",
+      "properties": {
+        "iMeasInfoId": {
+          "type": "integer"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "iMeasInfoId"
+      ]
+    },
+    "measInfoIdString": {
+      "description": "string measurement group identifier",
+      "type": "object",
+      "properties": {
+        "sMeasInfoId": {
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "sMeasInfoId"
+      ]
+    },
+    "measResultInteger": {
+      "description": "integer 3GPP PM measurement result",
+      "type": "object",
+      "properties": {
+        "p": {
+          "description": "integer reference to the counter",
+          "type": "integer"
+        },
+        "iValue": {
+          "description": "integer counter value",
+          "type": "integer"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "p",
+        "iValue"
+      ]
+    },
+    "measResultNull": {
+      "description": "null 3GPP PM measurement result",
+      "type": "object",
+      "properties": {
+        "p": {
+          "description": "integer reference to the counter",
+          "type": "integer"
+        },
+        "isNull": {
+          "description": "true if the counter has no value",
+          "type": "string",
+          "enum": [
+            "true",
+            "false"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "p",
+        "isNull"
+      ]
+    },
+    "measResultNumber": {
+      "description": "number 3GPP PM measurement result",
+      "type": "object",
+      "properties": {
+        "p": {
+          "description": "integer reference to the counter",
+          "type": "integer"
+        },
+        "rValue": {
+          "description": "numeric counter value",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "p",
+        "rValue"
+      ]
+    },
+    "measResultString": {
+      "description": "string 3GPP PM measurement result",
+      "type": "object",
+      "properties": {
+        "p": {
+          "description": "integer reference to the counter",
+          "type": "integer"
+        },
+        "sValue": {
+          "description": "string counter value",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "p",
+        "sValue"
+      ]
+    },
+    "measTypesInteger": {
+      "description": "object containing an array of integer measurement identifiers associated with the measurement results",
+      "type": "object",
+      "properties": {
+        "iMeasTypesList": {
+          "type": "array",
+          "items": {
+            "type": "integer"
+          }
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "iMeasTypesList"
+      ]
+    },
+    "measTypesString": {
+      "description": "object containing an array of string measurement identifiers associated with the measurement results",
+      "type": "object",
+      "properties": {
+        "sMeasTypesList": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "sMeasTypesList"
+      ]
+    },
+    "measurementFields": {
+      "description": "measurement fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "additionalMeasurements": {
+          "$ref": "#/definitions/arrayOfNamedHashMap"
+        },
+        "additionalObjects": {
+          "$ref": "#/definitions/arrayOfJsonObject"
+        },
+        "codecUsageArray": {
+          "description": "array of codecs in use",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/codecsInUse"
+          }
+        },
+        "concurrentSessions": {
+          "description": "peak concurrent sessions for the VM or xNF over the measurementInterval",
+          "type": "integer"
+        },
+        "configuredEntities": {
+          "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the xNF",
+          "type": "integer"
+        },
+        "cpuUsageArray": {
+          "description": "usage of an array of CPUs",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/cpuUsage"
+          }
+        },
+        "diskUsageArray": {
+          "description": "usage of an array of disks",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/diskUsage"
+          }
+        },
+        "featureUsageArray": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "filesystemUsageArray": {
+          "description": "filesystem usage of the VM on which the xNFC reporting the event is running",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/filesystemUsage"
+          }
+        },
+        "hugePagesArray": {
+          "description": "array of metrics on hugepPages",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/hugePages"
+          }
+        },
+        "ipmi": {
+          "$ref": "#/definitions/ipmi"
+        },
+        "latencyDistribution": {
+          "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-xNF configured ranges",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/latencyBucketMeasure"
+          }
+        },
+        "loadArray": {
+          "description": "array of system load metrics",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/load"
+          }
+        },
+        "machineCheckExceptionArray": {
+          "description": "array of machine check exceptions",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/machineCheckException"
+          }
+        },
+        "meanRequestLatency": {
+          "description": "mean seconds required to respond to each request for the VM on which the xNFC reporting the event is running",
+          "type": "number"
+        },
+        "measurementInterval": {
+          "description": "interval over which measurements are being reported in seconds",
+          "type": "number"
+        },
+        "measurementFieldsVersion": {
+          "description": "version of the measurementFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        },
+        "memoryUsageArray": {
+          "description": "memory usage of an array of VMs",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/memoryUsage"
+          }
+        },
+        "numberOfMediaPortsInUse": {
+          "description": "number of media ports in use",
+          "type": "integer"
+        },
+        "requestRate": {
+          "description": "peak rate of service requests per second to the xNF over the measurementInterval",
+          "type": "number"
+        },
+        "nfcScalingMetric": {
+          "description": "represents busy-ness of the network function from 0 to 100 as reported by the xNFC",
+          "type": "integer"
+        },
+        "nicPerformanceArray": {
+          "description": "usage of an array of network interface cards",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/nicPerformance"
+          }
+        },
+        "processStatsArray": {
+          "description": "array of metrics on system processes",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/processStats"
+          }
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "measurementInterval",
+        "measurementFieldsVersion"
+      ]
+    },
+    "measValues": {
+      "description": "3GPP measurement values",
+      "type": "object",
+      "properties": {
+        "measObjAddlFlds": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "measObjInstId": {
+          "description": "monitored object local distinguished name per 3GPP TS 32.300 and 3GPP TS 32.432",
+          "type": "string"
+        },
+        "measResults": {
+          "description": "array of results",
+          "type": "array",
+          "items": {
+            "oneOf": [
+              {
+                "$ref": "#/definitions/measResultInteger"
+              },
+              {
+                "$ref": "#/definitions/measResultNull"
+              },
+              {
+                "$ref": "#/definitions/measResultNumber"
+              },
+              {
+                "$ref": "#/definitions/measResultString"
+              }
+            ]
+          }
+        },
+        "suspectFlag": {
+          "description": "indicates if the values are suspect",
+          "type": "string",
+          "enum": [
+            "true",
+            "false"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "measObjInstId",
+        "measResults"
+      ]
+    },
+    "memoryUsage": {
+      "description": "memory usage of an identified virtual machine",
+      "type": "object",
+      "properties": {
+        "memoryBuffered": {
+          "description": "kibibytes of temporary storage for raw disk blocks",
+          "type": "number"
+        },
+        "memoryCached": {
+          "description": "kibibytes of memory used for cache",
+          "type": "number"
+        },
+        "memoryConfigured": {
+          "description": "kibibytes of memory configured in the virtual machine on which the xNFC reporting the event is running",
+          "type": "number"
+        },
+        "memoryDemand": {
+          "description": "host demand in kibibytes",
+          "type": "number"
+        },
+        "memoryFree": {
+          "description": "kibibytes of physical RAM left unused by the system",
+          "type": "number"
+        },
+        "memoryLatencyAvg": {
+          "description": "Percentage of time the VM is waiting to access swapped or compressed memory",
+          "type": "number"
+        },
+        "memorySharedAvg": {
+          "description": "shared memory in kilobytes",
+          "type": "number"
+        },
+        "memorySlabRecl": {
+          "description": "the part of the slab that can be reclaimed such as caches measured in kibibytes",
+          "type": "number"
+        },
+        "memorySlabUnrecl": {
+          "description": "the part of the slab that cannot be reclaimed even when lacking memory measured in kibibytes",
+          "type": "number"
+        },
+        "memorySwapInAvg": {
+          "description": "Amount of memory swapped-in from host cache in kibibytes",
+          "type": "number"
+        },
+        "memorySwapInRateAvg": {
+          "description": "rate at which memory is swapped from disk into active memory during the interval in kilobytes per second",
+          "type": "number"
+        },
+        "memorySwapOutAvg": {
+          "description": "Amount of memory swapped-out to host cache in kibibytes",
+          "type": "number"
+        },
+        "memorySwapOutRateAvg": {
+          "description": "rate at which memory is being swapped from active memory to disk during the current interval in kilobytes per second",
+          "type": "number"
+        },
+        "memorySwapUsedAvg": {
+          "description": "space used for caching swapped pages in the host cache in kibibytes",
+          "type": "number"
+        },
+        "memoryUsed": {
+          "description": "total memory minus the sum of free, buffered, cached and slab memory measured in kibibytes",
+          "type": "number"
+        },
+        "percentMemoryUsage": {
+          "description": "Percentage of memory usage; value = (memoryUsed / (memoryUsed + memoryFree) x 100 if denomintor is nonzero, or 0, if otherwise",
+          "type": "number"
+        },
+        "vmIdentifier": {
+          "description": "virtual machine identifier associated with the memory metrics",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "memoryFree",
+        "memoryUsed",
+        "vmIdentifier"
+      ]
+    },
+    "mobileFlowFields": {
+      "description": "mobileFlow fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "applicationType": {
+          "description": "Application type inferred",
+          "type": "string"
+        },
+        "appProtocolType": {
+          "description": "application protocol",
+          "type": "string"
+        },
+        "appProtocolVersion": {
+          "description": "application protocol version",
+          "type": "string"
+        },
+        "cid": {
+          "description": "cell id",
+          "type": "string"
+        },
+        "connectionType": {
+          "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc",
+          "type": "string"
+        },
+        "ecgi": {
+          "description": "Evolved Cell Global Id",
+          "type": "string"
+        },
+        "flowDirection": {
+          "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow",
+          "type": "string"
+        },
+        "gtpPerFlowMetrics": {
+          "$ref": "#/definitions/gtpPerFlowMetrics"
+        },
+        "gtpProtocolType": {
+          "description": "GTP protocol",
+          "type": "string"
+        },
+        "gtpVersion": {
+          "description": "GTP protocol version",
+          "type": "string"
+        },
+        "httpHeader": {
+          "description": "HTTP request header, if the flow connects to a node referenced by HTTP",
+          "type": "string"
+        },
+        "imei": {
+          "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device",
+          "type": "string"
+        },
+        "imsi": {
+          "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device",
+          "type": "string"
+        },
+        "ipProtocolType": {
+          "description": "IP protocol type e.g., TCP, UDP, RTP...",
+          "type": "string"
+        },
+        "ipVersion": {
+          "description": "IP protocol version e.g., IPv4, IPv6",
+          "type": "string"
+        },
+        "lac": {
+          "description": "location area code",
+          "type": "string"
+        },
+        "mcc": {
+          "description": "mobile country code",
+          "type": "string"
+        },
+        "mnc": {
+          "description": "mobile network code",
+          "type": "string"
+        },
+        "mobileFlowFieldsVersion": {
+          "description": "version of the mobileFlowFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        },
+        "msisdn": {
+          "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device",
+          "type": "string"
+        },
+        "otherEndpointIpAddress": {
+          "description": "IP address for the other endpoint, as used for the flow being reported on",
+          "type": "string"
+        },
+        "otherEndpointPort": {
+          "description": "IP Port for the reporting entity, as used for the flow being reported on",
+          "type": "integer"
+        },
+        "otherFunctionalRole": {
+          "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...",
+          "type": "string"
+        },
+        "rac": {
+          "description": "routing area code",
+          "type": "string"
+        },
+        "radioAccessTechnology": {
+          "description": "Radio Access Technology e.g., 2G, 3G, LTE",
+          "type": "string"
+        },
+        "reportingEndpointIpAddr": {
+          "description": "IP address for the reporting entity, as used for the flow being reported on",
+          "type": "string"
+        },
+        "reportingEndpointPort": {
+          "description": "IP port for the reporting entity, as used for the flow being reported on",
+          "type": "integer"
+        },
+        "sac": {
+          "description": "service area code",
+          "type": "string"
+        },
+        "samplingAlgorithm": {
+          "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied",
+          "type": "integer"
+        },
+        "tac": {
+          "description": "transport area code",
+          "type": "string"
+        },
+        "tunnelId": {
+          "description": "tunnel identifier",
+          "type": "string"
+        },
+        "vlanId": {
+          "description": "VLAN identifier used by this flow",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "flowDirection",
+        "gtpPerFlowMetrics",
+        "ipProtocolType",
+        "ipVersion",
+        "mobileFlowFieldsVersion",
+        "otherEndpointIpAddress",
+        "otherEndpointPort",
+        "reportingEndpointIpAddr",
+        "reportingEndpointPort"
+      ]
+    },
+    "namedHashMap": {
+      "description": "a hashMap which is associated with and described by a name",
+      "type": "object",
+      "properties": {
+        "name": {
+          "type": "string"
+        },
+        "hashMap": {
+          "$ref": "#/definitions/hashMap"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "name",
+        "hashMap"
+      ]
+    },
+    "nicPerformance": {
+      "description": "describes the performance and errors of an identified network interface card",
+      "type": "object",
+      "properties": {
+        "administrativeState": {
+          "description": "administrative state",
+          "type": "string",
+          "enum": [
+            "inService",
+            "outOfService"
+          ]
+        },
+        "nicIdentifier": {
+          "description": "nic identification",
+          "type": "string"
+        },
+        "operationalState": {
+          "description": "operational state",
+          "type": "string",
+          "enum": [
+            "inService",
+            "outOfService"
+          ]
+        },
+        "receivedBroadcastPacketsAccumulated": {
+          "description": "Cumulative count of broadcast packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedBroadcastPacketsDelta": {
+          "description": "Count of broadcast packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedDiscardedPacketsAccumulated": {
+          "description": "Cumulative count of discarded packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedDiscardedPacketsDelta": {
+          "description": "Count of discarded packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedErrorPacketsAccumulated": {
+          "description": "Cumulative count of error packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedErrorPacketsDelta": {
+          "description": "Count of error packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedMulticastPacketsAccumulated": {
+          "description": "Cumulative count of multicast packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedMulticastPacketsDelta": {
+          "description": "Count of multicast packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedOctetsAccumulated": {
+          "description": "Cumulative count of octets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedOctetsDelta": {
+          "description": "Count of octets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedTotalPacketsAccumulated": {
+          "description": "Cumulative count of all packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedPercentDiscard": {
+          "description": "Percentage of discarded packets received; value = (receivedDiscardedPacketsDelta / receivedTotalPacketsDelta) x 100, if denominator is nonzero, or 0, if otherwise",
+          "type": "number"
+        },
+        "receivedPercentError": {
+          "description": "Percentage of error packets received; value = (receivedErrorPacketsDelta / receivedTotalPacketsDelta) x 100, if denominator is nonzero, or 0, if otherwise.",
+          "type": "number"
+        },
+        "receivedTotalPacketsDelta": {
+          "description": "Count of all packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedUnicastPacketsAccumulated": {
+          "description": "Cumulative count of unicast packets received as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "receivedUnicastPacketsDelta": {
+          "description": "Count of unicast packets received within the measurement interval",
+          "type": "number"
+        },
+        "receivedUtilization": {
+          "description": "Percentage of utilization received; value = (receivedOctetsDelta / (speed x (lastEpochMicrosec - startEpochMicrosec))) x 100, if denominator is nonzero, or 0, if otherwise",
+          "type": "number"
+        },
+        "speed": {
+          "description": "Speed configured in mbps",
+          "type": "number"
+        },
+        "transmittedBroadcastPacketsAccumulated": {
+          "description": "Cumulative count of broadcast packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedBroadcastPacketsDelta": {
+          "description": "Count of broadcast packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedDiscardedPacketsAccumulated": {
+          "description": "Cumulative count of discarded packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedDiscardedPacketsDelta": {
+          "description": "Count of discarded packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedErrorPacketsAccumulated": {
+          "description": "Cumulative count of error packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedErrorPacketsDelta": {
+          "description": "Count of error packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedMulticastPacketsAccumulated": {
+          "description": "Cumulative count of multicast packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedMulticastPacketsDelta": {
+          "description": "Count of multicast packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedOctetsAccumulated": {
+          "description": "Cumulative count of octets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedOctetsDelta": {
+          "description": "Count of octets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedTotalPacketsAccumulated": {
+          "description": "Cumulative count of all packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedTotalPacketsDelta": {
+          "description": "Count of all packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedUnicastPacketsAccumulated": {
+          "description": "Cumulative count of unicast packets transmitted as read at the end of the measurement interval",
+          "type": "number"
+        },
+        "transmittedUnicastPacketsDelta": {
+          "description": "Count of unicast packets transmitted within the measurement interval",
+          "type": "number"
+        },
+        "transmittedPercentDiscard": {
+          "description": "Percentage of discarded packets transmitted; value = (transmittedDiscardedPacketsDelta / transmittedTotalPacketsDelta) x 100, if denominator is nonzero, or 0, if otherwise",
+          "type": "number"
+        },
+        "transmittedPercentError": {
+          "description": "Percentage of error packets received; value = (transmittedErrorPacketsDelta / transmittedTotalPacketsDelta) x 100, if denominator is nonzero, or 0, if otherwise",
+          "type": "number"
+        },
+        "transmittedUtilization": {
+          "description": "Percentage of utilization transmitted; value = (transmittedOctetsDelta / (speed x (lastEpochMicrosec - startEpochMicrosec))) x 100, if denominator is nonzero, or 0, if otherwise.",
+          "type": "number"
+        },
+        "valuesAreSuspect": {
+          "description": "Indicates whether vNicPerformance values are likely inaccurate due to counter overflow or other condtions",
+          "type": "string",
+          "enum": [
+            "true",
+            "false"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "nicIdentifier",
+        "valuesAreSuspect"
+      ]
+    },
+    "notificationFields": {
+      "description": "notification fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "arrayOfNamedHashMap": {
+          "$ref": "#/definitions/arrayOfNamedHashMap"
+        },
+        "changeContact": {
+          "description": "identifier for a contact related to the change",
+          "type": "string"
+        },
+        "changeIdentifier": {
+          "description": "system or session identifier associated with the change",
+          "type": "string"
+        },
+        "changeType": {
+          "description": "describes what has changed for the entity",
+          "type": "string"
+        },
+        "newState": {
+          "description": "new state of the entity",
+          "type": "string"
+        },
+        "oldState": {
+          "description": "previous state of the entity",
+          "type": "string"
+        },
+        "notificationFieldsVersion": {
+          "description": "version of the notificationFields block",
+          "type": "string",
+          "enum": [
+            "2.0"
+          ]
+        },
+        "stateInterface": {
+          "description": "card or port name of the entity that changed state",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "changeIdentifier",
+        "changeType",
+        "notificationFieldsVersion"
+      ]
+    },
+    "otherFields": {
+      "description": "fields for events belonging to the 'other' domain of the commonEventHeader domain enumeration",
+      "type": "object",
+      "properties": {
+        "arrayOfNamedHashMap": {
+          "$ref": "#/definitions/arrayOfNamedHashMap"
+        },
+        "hashMap": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "jsonObjects": {
+          "$ref": "#/definitions/arrayOfJsonObject"
+        },
+        "otherFieldsVersion": {
+          "description": "version of the otherFields block",
+          "type": "string",
+          "enum": [
+            "3.0"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "otherFieldsVersion"
+      ]
+    },
+    "perf3gppFields": {
+      "description": "fields for 3GPP PM format events, based on 3GPP TS 28.550, belonging to the 'perf3gpp' domain of the commonEventHeader domain enumeration",
+      "type": "object",
+      "properties": {
+        "eventAddlFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "measDataCollection": {
+          "$ref": "#/definitions/measDataCollection"
+        },
+        "perf3gppFieldsVersion": {
+          "description": "version of the perf3gppFields block",
+          "type": "string",
+          "enum": [
+            "1.0",
+            "1.0.1"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "measDataCollection",
+        "perf3gppFieldsVersion"
+      ]
+    },
+    "pnfRegistrationFields": {
+      "description": "hardware device registration fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "lastServiceDate": {
+          "description": "TS 32.692 dateOfLastService = date of last service; e.g. 15022017",
+          "type": "string"
+        },
+        "macAddress": {
+          "description": "MAC address of OAM interface of the unit",
+          "type": "string"
+        },
+        "manufactureDate": {
+          "description": "TS 32.692 dateOfManufacture = manufacture date of the unit; 24032016",
+          "type": "string"
+        },
+        "modelNumber": {
+          "description": "TS 32.692 versionNumber = version of the unit from vendor; e.g. AJ02.  Maps to AAI equip-model",
+          "type": "string"
+        },
+        "oamV4IpAddress": {
+          "description": "IPv4 m-plane IP address to be used by the manager to contact the PNF",
+          "type": "string",
+          "format":"ipv4"
+        },
+        "oamV6IpAddress": {
+          "description": "IPv6 m-plane IP address to be used by the manager to contact the PNF",
+          "type": "string",
+          "format":"ipv6"
+        },
+        "pnfRegistrationFieldsVersion": {
+          "description": "version of the pnfRegistrationFields block",
+          "type": "string",
+          "enum": [
+            "2.0",
+            "2.1"
+          ]
+        },
+        "serialNumber": {
+          "description": "TS 32.692 serialNumber = serial number of the unit; e.g. 6061ZW3",
+          "type": "string"
+        },
+        "softwareVersion": {
+          "description": "TS 32.692 swName = active SW running on the unit; e.g. 5gDUv18.05.201",
+          "type": "string"
+        },
+        "unitFamily": {
+          "description": "TS 32.692 vendorUnitFamilyType = general type of HW unit; e.g. BBU",
+          "type": "string"
+        },
+        "unitType": {
+          "description": "TS 32.692 vendorUnitTypeNumber = vendor name for the unit; e.g. Airscale",
+          "type": "string"
+        },
+        "vendorName": {
+          "description": "TS 32.692 vendorName = name of manufacturer; e.g. Nokia. Maps to AAI equip-vendor",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "pnfRegistrationFieldsVersion"
+      ]
+    },
+    "processorDimmAggregateThermalMargin": {
+      "description": "intelligent platform management interface (ipmi) processor dual inline memory module aggregate thermal margin metrics",
+      "type": "object",
+      "properties": {
+        "processorDimmAggregateThermalMarginIdentifier": {
+          "description": "identifier for the aggregate thermal margin metrics from the processor dual inline memory module",
+          "type": "string"
+        },
+        "thermalMargin": {
+          "description": "the difference between the DIMM's current temperature, in celsius, and the DIMM's throttling thermal trip point",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "processorDimmAggregateThermalMarginIdentifier",
+        "thermalMargin"
+      ]
+    },
+    "processStats": {
+      "description": "metrics on system processes",
+      "type": "object",
+      "properties": {
+        "forkRate": {
+          "description": "the number of threads created since the last reboot",
+          "type": "number"
+        },
+        "processIdentifier": {
+          "description": "processIdentifier",
+          "type": "string"
+        },
+        "psStateBlocked": {
+          "description": "the number of processes in a blocked state",
+          "type": "number"
+        },
+        "psStatePaging": {
+          "description": "the number of processes in a paging state",
+          "type": "number"
+        },
+        "psStateRunning": {
+          "description": "the number of processes in a running state",
+          "type": "number"
+        },
+        "psStateSleeping": {
+          "description": "the number of processes in a sleeping state",
+          "type": "number"
+        },
+        "psStateStopped": {
+          "description": "the number of processes in a stopped state",
+          "type": "number"
+        },
+        "psStateZombie": {
+          "description": "the number of processes in a zombie state",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "processIdentifier"
+      ]
+    },
+    "requestError": {
+      "description": "standard request error data structure",
+      "type": "object",
+      "properties": {
+        "messageId": {
+          "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception",
+          "type": "string"
+        },
+        "text": {
+          "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1",
+          "type": "string"
+        },
+        "url": {
+          "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents",
+          "type": "string"
+        },
+        "variables": {
+          "description": "List of zero or more strings that represent the contents of the variables used by the message text",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "messageId",
+        "text"
+      ]
+    },
+    "sipSignalingFields": {
+      "description": "sip signaling fields",
+      "type": "object",
+      "properties": {
+        "additionalInformation": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "compressedSip": {
+          "description": "the full SIP request/response including headers and bodies",
+          "type": "string"
+        },
+        "correlator": {
+          "description": "this is the same for all events on this call",
+          "type": "string"
+        },
+        "localIpAddress": {
+          "description": "IP address on xNF",
+          "type": "string"
+        },
+        "localPort": {
+          "description": "port on xNF",
+          "type": "string"
+        },
+        "remoteIpAddress": {
+          "description": "IP address of peer endpoint",
+          "type": "string"
+        },
+        "remotePort": {
+          "description": "port of peer endpoint",
+          "type": "string"
+        },
+        "sipSignalingFieldsVersion": {
+          "description": "version of the sipSignalingFields block",
+          "type": "string",
+          "enum": [
+            "3.0"
+          ]
+        },
+        "summarySip": {
+          "description": "the SIP Method or Response ('INVITE', '200 OK', 'BYE', etc)",
+          "type": "string"
+        },
+        "vendorNfNameFields": {
+          "$ref": "#/definitions/vendorNfNameFields"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "correlator",
+        "localIpAddress",
+        "localPort",
+        "remoteIpAddress",
+        "remotePort",
+        "sipSignalingFieldsVersion",
+        "vendorNfNameFields"
+      ]
+    },
+    "stateChangeFields": {
+      "description": "stateChange fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "newState": {
+          "description": "new state of the entity",
+          "type": "string",
+          "enum": [
+            "inService",
+            "maintenance",
+            "outOfService"
+          ]
+        },
+        "oldState": {
+          "description": "previous state of the entity",
+          "type": "string",
+          "enum": [
+            "inService",
+            "maintenance",
+            "outOfService"
+          ]
+        },
+        "stateChangeFieldsVersion": {
+          "description": "version of the stateChangeFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        },
+        "stateInterface": {
+          "description": "card or port name of the entity that changed state",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "newState",
+        "oldState",
+        "stateChangeFieldsVersion",
+        "stateInterface"
+      ]
+    },
+    "stndDefinedFields": {
+      "description": "stndDefined fields",
+      "type": "object",
+      "properties": {
+        "schemaReference": {
+          "description": "a uri of a standards-defined JSON object schema; used to valide the stndDefinedFields.data property contents",
+          "type": "string",
+          "format": "uri"
+        },
+        "data": {
+          "description": "a native standards-defined JSON notification",
+          "type": "object"
+        },
+        "stndDefinedFieldsVersion": {
+          "description": "version of stndDefinedFields block",
+          "type": "string",
+          "enum": [
+            "1.0"
+          ]
+        }
+      },
+      "additionalProperties": false,
+        "required": [
+          "data",
+          "stndDefinedFieldsVersion"
+        ]
+    },
+    "syslogFields": {
+      "description": "sysLog fields",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "eventSourceHost": {
+          "description": "hostname of the device",
+          "type": "string"
+        },
+        "eventSourceType": {
+          "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction",
+          "type": "string"
+        },
+        "syslogFacility": {
+          "description": "numeric code from 0 to 23 for facility--see table in documentation",
+          "type": "integer"
+        },
+        "syslogFieldsVersion": {
+          "description": "version of the syslogFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        },
+        "syslogMsg": {
+          "description": "syslog message",
+          "type": "string"
+        },
+        "syslogMsgHost": {
+          "description": "hostname parsed from non-VES syslog message",
+          "type": "string"
+        },
+        "syslogPri": {
+          "description": "0-192 combined severity and facility",
+          "type": "integer"
+        },
+        "syslogProc": {
+          "description": "identifies the application that originated the message",
+          "type": "string"
+        },
+        "syslogProcId": {
+          "description": "a change in the value of this field indicates a discontinuity in syslog reporting",
+          "type": "number"
+        },
+        "syslogSData": {
+          "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs",
+          "type": "string"
+        },
+        "syslogSdId": {
+          "description": "0-32 char in format name@number for example ourSDID@32473",
+          "type": "string"
+        },
+        "syslogSev": {
+          "description": "numerical Code for  severity derived from syslogPri as remaider of syslogPri / 8",
+          "type": "string",
+          "enum": [
+            "Alert",
+            "Critical",
+            "Debug",
+            "Emergency",
+            "Error",
+            "Info",
+            "Notice",
+            "Warning"
+          ]
+        },
+        "syslogTag": {
+          "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided",
+          "type": "string"
+        },
+        "syslogTs": {
+          "description": "timestamp parsed from non-VES syslog message",
+          "type": "string"
+        },
+        "syslogVer": {
+          "description": "IANA assigned version of the syslog protocol specification - typically 1",
+          "type": "number"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "eventSourceType",
+        "syslogFieldsVersion",
+        "syslogMsg",
+        "syslogTag"
+      ]
+    },
+    "thresholdCrossingAlertFields": {
+      "description": "fields specific to threshold crossing alert events",
+      "type": "object",
+      "properties": {
+        "additionalFields": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "additionalParameters": {
+          "description": "performance counters",
+          "type": "array",
+          "items": {
+            "$ref": "#/definitions/counter"
+          }
+        },
+        "alertAction": {
+          "description": "Event action",
+          "type": "string",
+          "enum": [
+            "CLEAR",
+            "CONT",
+            "SET"
+          ]
+        },
+        "alertDescription": {
+          "description": "Unique short alert description such as IF-SHUB-ERRDROP",
+          "type": "string"
+        },
+        "alertType": {
+          "description": "Event type",
+          "type": "string",
+          "enum": [
+            "CARD-ANOMALY",
+            "ELEMENT-ANOMALY",
+            "INTERFACE-ANOMALY",
+            "SERVICE-ANOMALY"
+          ]
+        },
+        "alertValue": {
+          "description": "Calculated API value (if applicable)",
+          "type": "string"
+        },
+        "associatedAlertIdList": {
+          "description": "List of eventIds associated with the event being reported",
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "collectionTimestamp": {
+          "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+          "type": "string"
+        },
+        "dataCollector": {
+          "description": "Specific performance collector instance used",
+          "type": "string"
+        },
+        "elementType": {
+          "description": "type of network element - internal ATT field",
+          "type": "string"
+        },
+        "eventSeverity": {
+          "description": "event severity or priority",
+          "type": "string",
+          "enum": [
+            "CRITICAL",
+            "MAJOR",
+            "MINOR",
+            "WARNING",
+            "NORMAL"
+          ]
+        },
+        "eventStartTimestamp": {
+          "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+          "type": "string"
+        },
+        "interfaceName": {
+          "description": "Physical or logical port or card (if applicable)",
+          "type": "string"
+        },
+        "networkService": {
+          "description": "network name - internal ATT field",
+          "type": "string"
+        },
+        "possibleRootCause": {
+          "description": "Reserved for future use",
+          "type": "string"
+        },
+        "thresholdCrossingFieldsVersion": {
+          "description": "version of the thresholdCrossingAlertFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "additionalParameters",
+        "alertAction",
+        "alertDescription",
+        "alertType",
+        "collectionTimestamp",
+        "eventSeverity",
+        "eventStartTimestamp",
+        "thresholdCrossingFieldsVersion"
+      ]
+    },
+    "vendorNfNameFields": {
+      "description": "provides vendor, nf and nfModule identifying information",
+      "type": "object",
+      "properties": {
+        "vendorName": {
+          "description": "network function vendor name",
+          "type": "string"
+        },
+        "nfModuleName": {
+          "description": "name of the nfModule generating the event",
+          "type": "string"
+        },
+        "nfName": {
+          "description": "name of the network function generating the event",
+          "type": "string"
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "vendorName"
+      ]
+    },
+    "voiceQualityFields": {
+      "description": "provides statistics related to customer facing voice products",
+      "type": "object",
+      "properties": {
+        "additionalInformation": {
+          "$ref": "#/definitions/hashMap"
+        },
+        "calleeSideCodec": {
+          "description": "callee codec for the call",
+          "type": "string"
+        },
+        "callerSideCodec": {
+          "description": "caller codec for the call",
+          "type": "string"
+        },
+        "correlator": {
+          "description": "this is the same for all events on this call",
+          "type": "string"
+        },
+        "endOfCallVqmSummaries": {
+          "$ref": "#/definitions/endOfCallVqmSummaries"
+        },
+        "phoneNumber": {
+          "description": "phone number associated with the correlator",
+          "type": "string"
+        },
+        "midCallRtcp": {
+          "description": "Base64 encoding of the binary RTCP data excluding Eth/IP/UDP headers",
+          "type": "string"
+        },
+        "vendorNfNameFields": {
+          "$ref": "#/definitions/vendorNfNameFields"
+        },
+        "voiceQualityFieldsVersion": {
+          "description": "version of the voiceQualityFields block",
+          "type": "string",
+          "enum": [
+            "4.0"
+          ]
+        }
+      },
+      "additionalProperties": false,
+      "required": [
+        "calleeSideCodec",
+        "callerSideCodec",
+        "correlator",
+        "midCallRtcp",
+        "vendorNfNameFields",
+        "voiceQualityFieldsVersion"
+      ]
+    }
+  }
+}
diff --git a/collector/evel-test-collector/docs/att_interface_definition/README.md b/collector/evel-test-collector/docs/att_interface_definition/README.md
new file mode 100644 (file)
index 0000000..0bb7c07
--- /dev/null
@@ -0,0 +1,7 @@
+NOTE: This folder contains updates for the VES 5.0 release. 
+* VNF Vendor Events ver 28.xlsx
+* AttServiceSpecAddendum-VesEventListener-EventRegistration-v1.4.docx
+* AttServiceSpecification-VesEventListener-v5.0.docx
+* CommonEventFormat_28.0.json
+
+The other files in this folder have not been updated. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
diff --git a/collector/evel-test-collector/docs/test_collector_user_guide/README.md b/collector/evel-test-collector/docs/test_collector_user_guide/README.md
new file mode 100644 (file)
index 0000000..6ccd58d
--- /dev/null
@@ -0,0 +1 @@
+NOTE: This folder and subfolders have not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
diff --git a/collector/evel-test-collector/docs/test_collector_user_guide/test_collector_user_guide.md b/collector/evel-test-collector/docs/test_collector_user_guide/test_collector_user_guide.md
new file mode 100644 (file)
index 0000000..5e12a90
--- /dev/null
@@ -0,0 +1,275 @@
+# AT&T Vendor Event Listener Service - Test Collector - User Guide
+
+Introduction
+============
+
+Background
+----------
+
+This document describes how to use the Test Collector application to simulate
+the service API described in "AT&T Service Specification, Service: 
+Vendor Event Listener Revision 2.11, 16-Sep-2016".
+
+Purpose
+-------
+
+This User Guide is intended to enable the reader to understand how
+    the Test Collector can be used to verify the operation of applications
+    supporting the Vendor Event Listener API.
+
+
+Realization
+===========
+
+The realization of the Test Collector is a Python script which acts as a
+server for the Vendor Event Listener API. It uses [jsonschema](https://pypi.python.org/pypi/jsonschema)
+in order to validate the received JSON events against AT&T's published
+schema for the API.
+
+The overall system architecture is shown in Figure 1 and comprises 
+    three key deliverables:
+
+*    The web-application itself.
+
+*    A Backend service.
+
+*    A validating test collector application.
+
+The Test Collector is described in more detail in the
+    following sections.  The other two components are described in a separate 
+    documents:
+
+*    Reference VNF User Guide
+
+*    Reference VNF Application Note
+
+Figure 1: Realization Architecture
+
+![Realization Architecture](images/architecture.png)
+
+Note that items shown in green in the diagram are existing AT&T
+    systems and do not form part of the delivery.
+
+Validating Collector
+--------------------
+
+The validating collector provides a basic test capability for
+    the Reference VNF. The application implements the Vendor Event
+    Listener API providing:
+
+-   Logging of new requests.
+
+-   Validating requests against the published schema.
+
+-   Validating the credentials provided in the request.
+
+-   Responding with a 202 Accepted for valid requests.
+
+-   Test Control endpoint allowing a test harness or user to set a pending
+    commandList, to be sent in response to the next event received.
+
+-   Responding with a 202 Accepted plus a pending commandList.
+
+-   Responding with a 401 Unauthorized error response-code and a JSON
+    exception response for failed authentication.
+
+It is intended to be used in environments where the "real" AT&T
+    Vendor Event Listener service is not available in order to test the
+    Reference VNF or, indeed, any other software which needs to send
+    events to a server.
+
+Using the Validating Collector
+==============================
+
+The test collector can be run manually, either on a Linux platform
+    or a Windows PC. It is invoked with a number of command-line
+    arguments:
+
+```
+  C:> python collector.py --config <file>
+                          --section <section>
+                          --verbose
+```
+
+Where:
+
+  -  **config** defines the path to the config file to be used.
+
+  -  **section** defines the section in the config file to be used.
+
+  -  **verbose** controls the level of logging to be generated.
+
+Wherever you chose to run the Test Collector, note that the
+    configuration of the backend service running on the VM generating
+    the events has to match so that the events generated by the backend
+    service are sent to the correct location and the Test Collector is
+    listening on the correct ports and URLs. The relevant section of the
+    Test Collector config file is:
+    
+```
+    #------------------------------------------------------------------------------
+    # Details of the Vendor Event Listener REST service.
+    #
+    # REST resources are defined with respect to a ServerRoot:
+    # ServerRoot = https://{Domain}:{Port}/{optionalRoutingPath}
+    #
+    # REST resources are of the form:
+    # * {ServerRoot}/eventListener/v{apiVersion}
+    # * {ServerRoot}/eventListener/v{apiVersion}/{topicName}
+    # * {ServerRoot}/eventListener/v{apiVersion}/eventBatch
+    # * {ServerRoot}/eventListener/v{apiVersion}/clientThrottlingState
+    #
+    # The "vel\_topic\_name" parameter is used as the "topicName" element in the path
+    # and may be empty.
+    #
+    # Note that the path, if present, should have no leading "/" but should have a
+    # training "/".
+    #------------------------------------------------------------------------------
+    vel_domain = 127.0.0.1
+    vel_port = 30000
+    vel_path = vendor_event_listener/
+    vel_username = Alice
+    vel_password = This isn't very secure!
+    vel_topic_name = example_vnf
+```
+The equivalent section of the backend service's configuration has to
+    match, or the equivalent parameters injected in the VM by the
+    OpenStack metadata service have to match.
+
+When events are sent from the web application, the results of the
+    validation will be displayed on stdout and be written to the log
+    file specified in the configuration file.
+
+For example: A Fault event failing to validate:
+
+```
+    <machine name>; - - [29/Feb/2016 10:58:28] "POST
+    /vendor_event_listener/eventListener/v1/example_vnf HTTP/1.1" 204  0
+    Event is not valid against schema! 'eventSeverity' is a required
+    property
+    Failed validating 'required' in
+    schema['properties']['event']['properties']['faultFields']:
+        {'description': 'fields specific to fault events',
+        'properties': {'alarmAdditionalInformation': {'description':'additional alarm information',
+                                                      'items': {'$ref': '#/definitions/field'},
+                                                      'type': 'array'},
+                       'alarmCondition': {'description': 'alarm condition reportedby the device',
+                                                      'type': 'string'},
+                       'alarmInterfaceA': {'description': 'card, port, channel or interface name of the device generating the alarm',
+                                                      'type': 'string'},
+                       'eventSeverity': {'description': 'event severity or priority',
+                                         'enum': ['CRITICAL',
+                                                   'MAJOR',
+                                                   'MINOR',
+                                                   'WARNING',
+                                                   'NORMAL'],
+                                         'type': 'string'},  
+                       'eventSourceType': {'description': 'type of event source',
+                                           'enum': ['other(0)',
+                                                     'router(1)',
+                                                     'switch(2)',
+                                                     'host(3)',
+                                                     'card(4)',
+                                                     'port(5)',
+                                                     'slotThreshold(6)',
+                                                     'portThreshold(7)',
+                                                     'virtualMachine(8)'],
+                                           'type': 'string'},
+                       'faultFieldsVersion': {'description': 'version of the faultFields block',
+                                              'type': 'number'},
+                       'specificProblem': {'description': 'short description of the alarm or problem',
+                                              'type': 'string'},
+                       'vfStatus': {'description': 'virtual function status enumeration',
+                                    'enum': ['Active',
+                                              'Idle',
+                                              'Preparing to terminate',
+                                              'Ready to terminate',
+                                              'Requesting termination'],
+                                    'type': 'string'}},
+            'required': ['alarmCondition',
+                          'eventSeverity',
+                          'eventSourceType',
+                          'specificProblem',
+                          'vfStatus'],
+            'type': 'object'}
+    On instance['event']['faultFields']:
+        {'alarmAdditionalInformation': [{'name': 'extra information',
+                                          'value': '2'},
+                                         {'name': 'more information',
+                                          'value': '1'}],
+         'alarmCondition': 'alarm condition 1',
+         'eventSourceType': 'virtualMachine(8)',
+         'faultFieldsVersion': 1,
+         'specificProblem': 'problem 1',
+         'vfStatus': 'Active'}
+    Bad JSON body decoded:
+    {
+        "event": {
+            "commonEventHeader": {
+                "domain": "fault",
+                "eventId": "6",
+                "eventType": "event type 1",
+                "functionalRole": "unknown",
+                "lastEpochMicrosec": 1456743510381000.0,
+                "priority": "Normal",
+                "reportingEntityId": "Not in OpenStack",
+                "reportingEntityName": "Not in OpenStack Environment",
+                "sequence": 0,
+                "sourceId": "Not in OpenStack",
+                "sourceName": "Not in OpenStack Environment",
+                "startEpochMicrosec": 1456743510381000.0,
+                "version": 1
+            },
+            "faultFields": {
+                "alarmAdditionalInformation": [
+                    {
+                        "name": "extra information",
+                        "value": "2"
+                    },
+                    {
+                        "name": "more information",
+                        "value": "1"
+                    }
+               ],
+               "alarmCondition": "alarm condition 1",
+               "eventSourceType": "virtualMachine(8)",
+               "faultFieldsVersion": 1,
+               "specificProblem": "problem 1",
+               "vfStatus": "Active"
+            }
+        }
+    }
+```
+
+Test Control Interface
+----------------------
+
+The test collector will accept any valid commandList on the Test Control interface,
+and will store it until the next event is received at the collector.
+At this point, it will send it to the event sender, and discard the pending commandList.
+
+For example, a POST of the following JSON will result in a measurement interval change
+command being sent to the sender of the next event.
+
+```
+{
+    "commandList": [
+        {
+            "command": {
+                "commandType": "measurementIntervalChange",
+                "measurementInterval": 60
+            }
+        }
+    ]
+}
+```
+
+A python script "test_control.py" provides an example of commandList injection,
+and contains various functions to generate example command lists.
+
+The test control script can be run manually, either on a Linux platform or a Windows PC.
+It is invoked with optional command-line arguments for the fqdn and port number of the 
+test collector to be controlled:
+```
+  C:> python test_control.py --fqdn 127.0.0.1 --port 30000
+```
diff --git a/collector/evel-test-collector/scripts/README.md b/collector/evel-test-collector/scripts/README.md
new file mode 100644 (file)
index 0000000..ab6bd99
--- /dev/null
@@ -0,0 +1 @@
+NOTE: This folder and subfolders have not been updated since the initial release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
diff --git a/collector/evel-test-collector/scripts/linux/go-collector.sh b/collector/evel-test-collector/scripts/linux/go-collector.sh
new file mode 100755 (executable)
index 0000000..173a9e4
--- /dev/null
@@ -0,0 +1,6 @@
+# Run the validating test collector.
+
+python ../../code/collector/collector.py \
+       --config ../../config/collector.conf \
+       --section default \
+       --verbose
diff --git a/collector/evel-test-collector/scripts/windows/go-collector.bat b/collector/evel-test-collector/scripts/windows/go-collector.bat
new file mode 100644 (file)
index 0000000..4dbf9d9
--- /dev/null
@@ -0,0 +1,7 @@
+@echo off
+REM Run the validating test collector.
+
+python ..\..\code\collector\collector.py ^
+       --config ..\..\config\collector.conf ^
+       --section windows ^
+       --verbose
diff --git a/collector/start.sh b/collector/start.sh
new file mode 100755 (executable)
index 0000000..250af34
--- /dev/null
@@ -0,0 +1,97 @@
+#!/bin/bash
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Startup script for the OPNFV VES Collector running under docker.
+
+cd /opt/ves
+touch monitor.log
+
+sed -i -- \
+  "s~log_file = /var/log/att/collector.log~log_file = /opt/ves/collector.log~" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s/vel_port = 30000/vel_port = $ves_port/g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s/vel_username =/vel_username = $ves_user/g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \
+  evel-test-collector/config/collector.conf
+sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host:$ves_influxdb_port" \
+  evel-test-collector/config/collector.conf
+
+echo; echo "evel-test-collector/config/collector.conf"
+cat evel-test-collector/config/collector.conf
+
+echo; echo "wait for InfluxDB API at $ves_influxdb_host:$ves_influxdb_port"
+while ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ; do
+  echo "InfluxDB API is not yet responding... waiting 10 seconds"
+  sleep 10
+done
+
+echo; echo "setup veseventsdb in InfluxDB"
+# TODO: check if pre-existing and skip
+curl -X POST http://$ves_influxdb_host:$ves_influxdb_port/query \
+  --data-urlencode "q=CREATE DATABASE veseventsdb"
+
+echo; echo "wait for Grafana API to be active"
+while ! curl http://$ves_grafana_host:$ves_grafana_port ; do
+  echo "Grafana API is not yet responding... waiting 10 seconds"
+  sleep 10
+done
+
+echo; echo "add VESEvents datasource to Grafana"
+# TODO: check if pre-existing and skip
+cat <<EOF >/opt/ves/datasource.json
+{ "name":"VESEvents",
+  "type":"influxdb",
+  "access":"direct",
+  "url":"http://$ves_influxdb_host:$ves_influxdb_port",
+  "password":"root",
+  "user":"root",
+  "database":"veseventsdb",
+  "basicAuth":false,
+  "basicAuthUser":"",
+  "basicAuthPassword":"",
+  "withCredentials":false,
+  "isDefault":false,
+  "jsonData":null
+}
+EOF
+
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+  -X POST -d @/opt/ves/datasource.json \
+  http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources
+
+echo; echo "add VES dashboard to Grafana"
+curl -H "Accept: application/json" -H "Content-type: application/json" \
+  -X POST -d @/opt/ves/Dashboard.json \
+  http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db       
+
+if [[ "$ves_loglevel" != "" ]]; then 
+  python /opt/ves/evel-test-collector/code/collector/monitor.py \
+    --config /opt/ves/evel-test-collector/config/collector.conf \
+    --influxdb $ves_influxdb_host:$ves_influxdb_port \
+    --section default > /opt/ves/monitor.log 2>&1
+else
+  python /opt/ves/evel-test-collector/code/collector/monitor.py \
+    --config /opt/ves/evel-test-collector/config/collector.conf \
+    --influxdb $ves_influxdb_host:$ves_influxdb_port \
+    --section default
+fi
diff --git a/collector/ves-start.sh b/collector/ves-start.sh
new file mode 100755 (executable)
index 0000000..ccc7fda
--- /dev/null
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+# Script to run the ves project and its dependent containers 
+# Maintainer shrinivas.joshi@xoriant.com 
+
+#List of containers for this project
+
+#collector -- Read the event received from ves-agent and write it to
+#             influxdb
+#grafana -- Read the events written by ves-collector in influxdb and
+#           show the graphs on UI
+#influxdb -- Store the events in DB sent by ves-agent
+
+#Port allotment on host system for the micro services running in docker.
+
+#Stop all containers if those are running accedently.
+
+./ves-stop.sh
+
+influx_port=3330
+grafana_port=8880
+vel_ves_port=9999
+
+#Check Docker, collectd and git is installed on the VM
+
+#get local ip address of VM from first interface
+
+
+local_ip=`/sbin/ip -o -4 addr list | grep enp | head -n 1 | awk '{print $4}' | cut -d/ -f1`
+echo -e "Binding VES Services to local ip address $local_ip \n "
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin influx DB
+echo -e "Starting influxdb container on Local Port Number $influx_port. Please wait..\n"
+docker run -d -p $influx_port:8086 -v $PWD/influxdb influxdb
+if [ $? != 0 ]
+then
+    exit 1
+fi
+
+sleep 5 #Give some time to spin the container and bring service up
+echo "Done."
+echo""
+echo -e "--------------------------------------------------------------------\n"
+#Spin Grafana Cotainer
+echo -e "Starting Grafana cotainer on Local port number $grafana_port. Please wait..\n"
+docker run -d -p $grafana_port:3000 grafana/grafana
+if [ $? != 0 ]
+then
+    exit 1
+fi
+sleep 5 #Give some time to spin the container and bring service up
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin collector container.
+echo -e "Starting ves collector container on Local port number $vel_ves_port. Please wait\n"
+docker run -d -e ves_influxdb_host=$local_ip \
+       -e ves_influxdb_port=$influx_port -e ves_grafana_host=$local_ip \
+       -e ves_grafana_port=$grafana_port -e ves_host=$local_ip \
+       -e ves_port=$vel_ves_port -e ves_grafana_auth='admin:admin' \
+       -e ves_user='user' -e ves_pass='password' -e ves_path=''\
+       -e ves_topic='events' -e ves_loglevel='DEBUG' \
+       -p $vel_ves_port:$vel_ves_port ves-collector
+if [ $? != 0 ]
+then
+    exit 1
+fi
+sleep 6
+echo "Done."
+echo ""
+echo""
+echo -e "ves stack summary\n"
+
+echo -e "===================================================================================================================\n"
+echo ""
+echo -e "ves collector listner port: $vel_ves_port \n"
+echo -e "Grafana port: $grafana_port \n"
+echo -e "To access grafana dashboard paste url  http://$local_ip:$grafana_port in web browser. "
+echo -e "Grafana username/password is admin/admin *** DO NOT CHANGE THE ADMIN PASSWORD, CLICK SKIP OPTION ***\n"
+echo ""
+echo -e "===================================================================================================================\n" 
diff --git a/collector/ves-stop.sh b/collector/ves-stop.sh
new file mode 100755 (executable)
index 0000000..b6311ca
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+echo "Stopping all containers"
+docker stop $(docker ps -aq)