Enhanced Ves-Collector to support measurement events.
Issue-Id: SMO-16
Signed-off-by: santanude <santanu.de@xoriant.com>
Change-Id: Ic8e1472e65472a6d6b6aeb944734d931692e7d13
Signed-off-by: santanude <santanu.de@xoriant.com>
--- /dev/null
+default: all
+
+all:
+ cd agent; make
+ cd collector; make
+ cd kafka; make
+
+clean:
+ docker rm `docker ps -a -q -f status=exited`
RUN:
-There are two scripts in the collector folder. A ves-start.sh script
+There are two scripts in this folder. A ves-start.sh script
which starts the VES collector and other parts. A ves-stop.sh script
-can be used to stop the collector.
\ No newline at end of file
+can be used to stop the collector.
--- /dev/null
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: A Dockerfile for building an OPFNV VES Agent container image.
+#
+# Status: this is a work in progress, under test.
+#
+
+FROM ubuntu:bionic
+
+RUN mkdir /opt/ves
+
+RUN apt-get update && apt-get -y upgrade
+# Required for kafka: default-jre zookeeperd python-pip kafka-python
+# Required for building librdkafka: git build-essential
+# libpthread-stubs0-dev libssl-dev libsasl2-dev liblz4-dev
+# (or libz-dev?)
+# Required for building collectd: pkg-config
+RUN apt-get install -y default-jre zookeeperd \
+python-pip pkg-config git build-essential libpthread-stubs0-dev \
+libssl-dev libsasl2-dev liblz4-dev libz-dev
+RUN pip install kafka-python
+
+# Install VES Agent
+RUN pip install pyaml
+
+RUN mkdir /opt/ves/barometer
+ADD barometer /opt/ves/barometer
+
+COPY start.sh /opt/ves/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
--- /dev/null
+default: all
+
+all:
+ docker build -t ves-agent .
--- /dev/null
+Original work Copyright 2016-2017 Intel Corporation
+Modified work Copyright 2021 Xoriant Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+[config]
+Domain = 127.0.0.1
+Path =
+Port = 9999
+Topic = events
+UseHttps = False
+Username = user
+Password = password
+SendEventInterval = 10
+ApiVersion = 7
+KafkaPort = 9092
+KafkaBroker = 127.0.0.1
--- /dev/null
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Authors:
+# Volodymyr Mytnyk <volodymyrx.mytnyk@intel.com>
+#
+
+import yaml
+import logging
+import datetime
+import time
+from threading import RLock
+from threading import Timer
+from threading import Thread
+import re
+
+# import YAML loader
+try:
+ from yaml import CLoader as Loader
+except ImportError:
+ from yaml import Loader
+
+# import synchronized queue
+try:
+ import queue
+except ImportError:
+ import Queue as queue
+
+
+class Config(object):
+ """Configuration class used to pass config option into YAML file"""
+
+ def __init__(self, interval):
+ self.interval = interval
+
+
+class System(object):
+ """System class which provides information like host, time etc., into YAML
+ file"""
+
+ def __init__(self):
+ self.hostname = 'localhost'
+ self._id = 0
+
+ @property
+ def id(self):
+ self._id = self._id + 1
+ return self._id
+
+ @property
+ def time(self):
+ return time.time()
+
+ @property
+ def date(self):
+ return datetime.date.today().isoformat()
+
+
+class ItemIterator(object):
+ """Item iterator returned by Collector class"""
+
+ def __init__(self, collector, items):
+ """Item iterator init"""
+ logging.debug('{}:__init__()'.format(self.__class__.__name__))
+ self._items = items
+ self._collector = collector
+ self._index = 0
+
+ def __next__(self):
+ """Returns next item from the list"""
+ if self._index == len(self._items):
+ raise StopIteration
+ curr_index = self._index
+ self._index = curr_index + 1
+ return self.items[curr_index]
+
+ def __getitem__(self, key):
+ """get item by index"""
+ return self._items[key]
+
+ def __len__(self):
+ """Return length of elements"""
+ return len(self._items)
+
+ def __del__(self):
+ """Destroy iterator and unlock the collector"""
+ logging.debug('{}:__del__()'.format(self.__class__.__name__))
+ self._collector.unlock()
+
+
+class ItemObject(object):
+ """Item object returned by Collector class"""
+
+ def __init__(self, collector, hash_):
+ """Item object init"""
+ logging.debug('{}:__init__()'.format(self.__class__.__name__))
+ super(ItemObject, self).__setattr__('_collector', collector)
+ super(ItemObject, self).__setattr__('_hash', hash_)
+
+ def __setattr__(self, name, value):
+ t, item = self._collector._metrics[self._hash]
+ logging.debug('{}:__setattr__(name={}, value={})'.format(
+ self.__class__.__name__, name, value))
+ setattr(item, name, value)
+ self._collector._metrics[self._hash] = (time.time(), item)
+
+ def __del__(self):
+ """Destroy item object and unlock the collector"""
+ logging.debug('{}:__del__()'.format(self.__class__.__name__))
+ self._collector.unlock()
+
+
+class Collector(object):
+ """Thread-safe collector with aging feature"""
+
+ def __init__(self, age_timeout):
+ """Initialization"""
+ self._metrics = {}
+ self._lock = RLock()
+ self._age_timeout = age_timeout
+ self._start_age_timer()
+
+ def _start_age_timer(self):
+ """Start age timer"""
+ self._age_timer = Timer(self._age_timeout, self._on_timer)
+ self._age_timer.start()
+
+ def _stop_age_timer(self):
+ """Stop age timer"""
+ self._age_timer.cancel()
+
+ def _on_timer(self):
+ """Age timer"""
+ self._start_age_timer()
+ self._check_aging()
+
+ def _check_aging(self):
+ """Check aging time for all items"""
+ self.lock()
+ for data_hash, data in list(self._metrics.items()):
+ age, item = data
+ if ((time.time() - age) >= self._age_timeout):
+ # aging time has expired, remove the item from the collector
+ logging.debug('{}:_check_aging():value={}'.format(
+ self.__class__.__name__, item))
+ self._metrics.pop(data_hash)
+ del(item)
+ self.unlock()
+
+ def lock(self):
+ """Lock the collector"""
+ logging.debug('{}:lock()'.format(self.__class__.__name__))
+ self._lock.acquire()
+
+ def unlock(self):
+ """Unlock the collector"""
+ logging.debug('{}:unlock()'.format(self.__class__.__name__))
+ self._lock.release()
+
+ def get(self, hash_):
+ self.lock()
+ if hash_ in self._metrics:
+ return ItemObject(self, hash_)
+ self.unlock()
+ return None
+
+ def add(self, item):
+ """Add an item into the collector"""
+ self.lock()
+ logging.debug('{}:add(item={})'.format(self.__class__.__name__, item))
+ self._metrics[hash(item)] = (time.time(), item)
+ self.unlock()
+
+ def items(self, select_list=[]):
+ """Returns locked (safe) item iterator"""
+ metrics = []
+ self.lock()
+ for k, item in list(self._metrics.items()):
+ _, value = item
+ for select in select_list:
+ if value.match(**select):
+ metrics.append(value)
+ return ItemIterator(self, metrics)
+
+ def destroy(self):
+ """Destroy the collector"""
+ self._stop_age_timer()
+
+
+class CollectdData(object):
+ """Base class for Collectd data"""
+
+ def __init__(self, host=None, plugin=None, plugin_instance=None,
+ type_=None, type_instance=None, time_=None):
+ """Class initialization"""
+ self.host = host
+ self.plugin = plugin
+ self.plugin_instance = plugin_instance
+ self.type_instance = type_instance
+ self.type = type_
+ self.time = time_
+
+ @classmethod
+ def is_regular_expression(cls, expr):
+ return len(expr) > 1 and expr[0] == '/' and expr[-1] == '/'
+
+ def match(self, **kargs):
+ # compare the metric
+ for key, value in list(kargs.items()):
+ if self.is_regular_expression(value):
+ if re.match(value[1:-1], getattr(self, key)) is None:
+ return False
+ elif value != getattr(self, key):
+ return False
+ # return match event if kargs is empty
+ return True
+
+
+class CollectdNotification(CollectdData):
+ """Collectd notification"""
+
+ def __init__(self, host=None, plugin=None, plugin_instance=None,
+ type_=None, type_instance=None, severity=None, message=None):
+ super(CollectdNotification, self).__init__(
+ host, plugin, plugin_instance, type_, type_instance)
+ self.severity = severity
+ self.message = message
+
+ def __repr__(self):
+ return '{}(host={}, plugin={}, plugin_instance={}, type={}, ' \
+ 'type_instance={}, severity={}, message={}, time={})'.format(
+ self.__class__.__name__, self.host, self.plugin,
+ self.plugin_instance, self.type, self.type_instance,
+ self.severity, self.message, time)
+
+
+class CollectdValue(CollectdData):
+ """Collectd value"""
+
+ def __init__(self, host=None, plugin=None, plugin_instance=None,
+ type_=None, type_instance=None, ds_name='value', value=None,
+ interval=None):
+ super(CollectdValue, self).__init__(
+ host, plugin, plugin_instance, type_, type_instance)
+ self.value = value
+ self.ds_name = ds_name
+ self.interval = interval
+
+ @classmethod
+ def hash_gen(cls, host, plugin, plugin_instance, type_,
+ type_instance, ds_name):
+ return hash((host, plugin, plugin_instance, type_,
+ type_instance, ds_name))
+
+ def __eq__(self, other):
+ return hash(self) == hash(other) and self.value == other.value
+
+ def __hash__(self):
+ return self.hash_gen(self.host, self.plugin, self.plugin_instance,
+ self.type, self.type_instance, self.ds_name)
+
+ def __repr__(self):
+ return '{}(host={}, plugin={}, plugin_instance={}, type={}, ' \
+ 'type_instance={}, ds_name={}, value={}, time={})'.format(
+ self.__class__.__name__, self.host, self.plugin,
+ self.plugin_instance, self.type, self.type_instance,
+ self.ds_name, self.value, self.time)
+
+
+class Item(yaml.YAMLObject):
+ """Base class to process tags like ArrayItem/ValueItem"""
+
+ @classmethod
+ def format_node(cls, mapping, metric):
+ if mapping.tag in [
+ 'tag:yaml.org,2002:str', Bytes2Kibibytes.yaml_tag,
+ Number.yaml_tag, StripExtraDash.yaml_tag]:
+ return yaml.ScalarNode(mapping.tag, mapping.value.format(**metric))
+ elif mapping.tag == 'tag:yaml.org,2002:map':
+ values = []
+ for key, value in mapping.value:
+ values.append((yaml.ScalarNode(key.tag, key.value),
+ cls.format_node(value, metric)))
+ return yaml.MappingNode(mapping.tag, values)
+ elif mapping.tag in [ArrayItem.yaml_tag, ValueItem.yaml_tag]:
+ values = []
+ for seq in mapping.value:
+ map_values = list()
+ for key, value in seq.value:
+ if key.value == 'SELECT':
+ map_values.append((yaml.ScalarNode(key.tag, key.value),
+ cls.format_node(value, metric)))
+ else:
+ map_values.append((yaml.ScalarNode(key.tag, key.value),
+ value))
+ values.append(yaml.MappingNode(seq.tag, map_values))
+ return yaml.SequenceNode(mapping.tag, values)
+ elif mapping.tag in [MapValue.yaml_tag]:
+ values = []
+ for key, value in mapping.value:
+ if key.value == 'VALUE':
+ values.append((yaml.ScalarNode(key.tag, key.value),
+ cls.format_node(value, metric)))
+ else:
+ values.append((yaml.ScalarNode(key.tag, key.value), value))
+ return yaml.MappingNode(mapping.tag, values)
+ return mapping
+
+
+class ValueItem(Item):
+ """Class to process VlaueItem tag"""
+ yaml_tag = '!ValueItem'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ logging.debug('{}:from_yaml(loader={})'.format(cls.__name__, loader))
+ default, select, value_desc = None, list(), None
+ # find value description
+ for elem in node.value:
+ for key, value in elem.value:
+ if key.value == 'VALUE':
+ assert value_desc is None, "VALUE key already set"
+ value_desc = value
+ if key.value == 'SELECT':
+ select.append(loader.construct_mapping(value))
+ if key.value == 'DEFAULT':
+ assert default is None, "DEFAULT key already set"
+ default = loader.construct_object(value)
+ # if VALUE key isn't given, use default VALUE key
+ # format: `VALUE: !Number '{vl.value}'`
+ if value_desc is None:
+ value_desc = yaml.ScalarNode(tag='!Number', value='{vl.value}')
+ # select collectd metric based on SELECT condition
+ metrics = loader.collector.items(select)
+ assert len(metrics) < 2, \
+ 'Wrong SELECT condition {}, selected {} metrics'.format(
+ select, len(metrics))
+ if len(metrics) > 0:
+ item = cls.format_node(value_desc, {'vl': metrics[0],
+ 'system': loader.system})
+ return loader.construct_object(item)
+ # nothing has been found by SELECT condition, set to DEFAULT value.
+ assert default is not None, "No metrics selected by SELECT condition" \
+ " {} and DEFAULT key isn't set".format(select)
+ return default
+
+
+class ArrayItem(Item):
+ """Class to process ArrayItem tag"""
+ yaml_tag = '!ArrayItem'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ logging.debug('{}:process(loader={}, node={})'.format(cls.__name__,
+ loader, node))
+ # e.g.:
+ # SequenceNode(tag=u'!ArrayItem', value=[
+ # MappingNode(tag=u'tag:yaml.org,2002:map', value=[
+ # (ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'SELECT'),
+ # MappingNode(tag=u'tag:yaml.org,2002:map', value=[
+ # (ScalarNode(tag=u'tag:yaml.org,2002:str', value=u'plugin'),
+ # , ...)
+ # ]), ...
+ # ), (key, value), ... ])
+ # , ... ])
+ assert isinstance(node, yaml.SequenceNode), \
+ "{} tag isn't YAML array".format(cls.__name__)
+ select, index_keys, items, item_desc = list(), list(), list(), None
+ for elem in node.value:
+ for key, value in elem.value:
+ if key.value == 'ITEM-DESC':
+ assert item_desc is None, "ITEM-DESC key already set"
+ item_desc = value
+ if key.value == 'INDEX-KEY':
+ assert len(index_keys) == 0, "INDEX-KEY key already set"
+ index_keys = loader.construct_sequence(value)
+ if key.value == 'SELECT':
+ select.append(loader.construct_mapping(value))
+ # validate item description
+ assert item_desc is not None, "Mandatory ITEM-DESC key isn't set"
+ assert len(select) > 0 or len(index_keys) > 0, \
+ "Mandatory key (INDEX-KEY or SELECT) isn't set"
+ metrics = loader.collector.items(select)
+ # select metrics based on INDEX-KEY provided
+ if len(index_keys) > 0:
+ metric_set = set()
+ for metric in metrics:
+ value = CollectdValue()
+ for key in index_keys:
+ setattr(value, key, getattr(metric, key))
+ metric_set.add(value)
+ metrics = list(metric_set)
+ # build items based on SELECT and/or INDEX-KEY criteria
+ for metric in metrics:
+ item = cls.format_node(item_desc,
+ {'vl': metric, 'system': loader.system,
+ 'config': loader.config})
+ items.append(loader.construct_mapping(item))
+ return items
+
+
+class Measurements(ArrayItem):
+ """Class to process Measurements tag"""
+ yaml_tag = '!Measurements'
+
+
+class Events(Item):
+ """Class to process Events tag"""
+ yaml_tag = '!Events'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ condition, item_desc = dict(), None
+ for elem in node.value:
+ for key, value in elem.value:
+ if key.value == 'ITEM-DESC':
+ item_desc = value
+ if key.value == 'CONDITION':
+ condition = loader.construct_mapping(value)
+ assert item_desc is not None, "Mandatory ITEM-DESC key isn't set"
+ if loader.notification.match(**condition):
+ item = cls.format_node(item_desc, {
+ 'n': loader.notification, 'system': loader.system})
+ return loader.construct_mapping(item)
+ return None
+
+
+class Bytes2Kibibytes(yaml.YAMLObject):
+ """Class to process Bytes2Kibibytes tag"""
+ yaml_tag = '!Bytes2Kibibytes'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ return round(float(node.value) / 1024.0, 3)
+
+
+class Number(yaml.YAMLObject):
+ """Class to process Number tag"""
+ yaml_tag = '!Number'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ try:
+ return int(node.value)
+ except ValueError:
+ return float(node.value)
+
+
+class StripExtraDash(yaml.YAMLObject):
+ """Class to process StripExtraDash tag"""
+ yaml_tag = '!StripExtraDash'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ return '-'.join([x for x in node.value.split('-') if len(x) > 0])
+
+
+class MapValue(yaml.YAMLObject):
+ """Class to process MapValue tag"""
+ yaml_tag = '!MapValue'
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ mapping, val = None, None
+ for key, value in node.value:
+ if key.value == 'TO':
+ mapping = loader.construct_mapping(value)
+ if key.value == 'VALUE':
+ val = loader.construct_object(value)
+ assert mapping is not None, "Mandatory TO key isn't set"
+ assert val is not None, "Mandatory VALUE key isn't set"
+ assert val in mapping, \
+ 'Value "{}" cannot be mapped to any of {} values'.format(
+ val, list(mapping.keys()))
+ return mapping[val]
+
+
+class Normalizer(object):
+ """Normalization class which handles events and measurements"""
+
+ def __init__(self):
+ """Init"""
+ self.interval = None
+ self.collector = None
+ self.system = None
+ self.queue = None
+ self.timer = None
+
+ @classmethod
+ def read_configuration(cls, config_file):
+ """read YAML configuration file"""
+ # load YAML events/measurements definition
+ f = open(config_file, 'r')
+ doc_yaml = yaml.compose(f)
+ f.close()
+ # split events & measurements definitions
+ measurements, events = list(), list()
+ for key, value in doc_yaml.value:
+ if value.tag == Measurements.yaml_tag:
+ measurements.append((key, value))
+ if value.tag == Events.yaml_tag:
+ events.append((key, value))
+ measurements_yaml = yaml.MappingNode('tag:yaml.org,2002:map',
+ measurements)
+ measurements_stream = yaml.serialize(measurements_yaml)
+ events_yaml = yaml.MappingNode('tag:yaml.org,2002:map', events)
+ events_stream = yaml.serialize(events_yaml)
+ # return event & measurements definition
+ return events_stream, measurements_stream
+
+ def initialize(self, config_file, interval):
+ """Initialize the class"""
+ e, m = self.read_configuration(config_file)
+ self.measurements_stream = m
+ self.events_stream = e
+ self.system = System()
+ self.config = Config(interval)
+ self.interval = interval
+ # start collector with aging time = double interval
+ self.collector = Collector(interval * 2)
+ # initialize event thread
+ self.queue = queue.Queue()
+ self.event_thread = Thread(target=self.event_worker)
+ self.event_thread.daemon = True
+ self.event_thread.start()
+ # initialize measurements timer
+ self.start_timer()
+
+ def destroy(self):
+ """Destroy the class"""
+ self.collector.destroy()
+ self.post_event(None) # send stop event
+ self.event_thread.join()
+ self.stop_timer()
+
+ def start_timer(self):
+ """Start measurements timer"""
+ self.timer = Timer(self.interval, self.on_timer)
+ self.timer.start()
+
+ def stop_timer(self):
+ """Stop measurements timer"""
+ self.timer.cancel()
+
+ def on_timer(self):
+ """Measurements timer"""
+ self.start_timer()
+ self.process_measurements()
+
+ def event_worker(self):
+ """Event worker"""
+ while True:
+ event = self.queue.get()
+ if isinstance(event, CollectdNotification):
+ self.process_notify(event)
+ continue
+ # exit for the worker
+ break
+
+ def get_collector(self):
+ """Get metric collector reference"""
+ return self.collector
+
+ def process_measurements(self):
+ """Process measurements"""
+ loader = yaml.Loader(self.measurements_stream)
+ setattr(loader, 'collector', self.collector)
+ setattr(loader, 'system', self.system)
+ setattr(loader, 'config', self.config)
+ measurements = loader.get_data()
+ for measurement_name in measurements:
+ logging.debug('Process "{}" measurements: {}'.format(
+ measurement_name, measurements[measurement_name]))
+ for measurement in measurements[measurement_name]:
+ self.send_data(measurement)
+
+ def process_notify(self, notification):
+ """Process events"""
+ loader = Loader(self.events_stream)
+ setattr(loader, 'notification', notification)
+ setattr(loader, 'system', self.system)
+ notifications = loader.get_data()
+ for notify_name in notifications:
+ logging.debug('Process "{}" notification'.format(notify_name))
+ if notifications[notify_name] is not None:
+ self.send_data(notifications[notify_name])
+
+ def send_data(self, data):
+ """Send data"""
+ assert False, 'send_data() is abstract function and MUST be overridden'
+
+ def post_event(self, notification):
+ """Post notification into the queue to process"""
+ self.queue.put(notification)
--- /dev/null
+#!/usr/bin/env python
+#
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import json
+import sys
+import base64
+import logging
+import argparse
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+from distutils.util import strtobool
+from kafka import KafkaConsumer
+
+from normalizer import Normalizer
+from normalizer import CollectdValue
+
+try:
+ # For Python 3.0 and later
+ import urllib.request as url
+except ImportError:
+ # Fall back to Python 2's urllib2
+ import urllib2 as url
+
+
+class VESApp(Normalizer):
+ """VES Application"""
+
+ def __init__(self):
+ """Application initialization"""
+ self._app_config = {
+ 'Domain': '127.0.0.1',
+ 'Port': 9999,
+ 'Path': '',
+ 'Username': 'user',
+ 'Password': 'password',
+ 'Topic': 'events',
+ 'UseHttps': False,
+ 'SendEventInterval': 10.0,
+ 'ApiVersion': 5,
+ 'KafkaPort': 9092,
+ 'KafkaBroker': 'mykafka'
+ }
+
+ def send_data(self, event):
+ """Send event to VES"""
+ server_url = "http{}://{}:{}{}/eventListener/v{}{}".format(
+ 's' if self._app_config['UseHttps'] else '',
+ self._app_config['Domain'], int(self._app_config['Port']),
+ '{}'.format('/{}'.format(self._app_config['Path']) if len(
+ self._app_config['Path']) > 0 else ''),
+ int(self._app_config['ApiVersion']), '{}'.format(
+ '/{}'.format(self._app_config['Topic']) if len(
+ self._app_config['Topic']) > 0 else ''))
+ logging.info('Vendor Event Listener is at: {}'.format(server_url))
+ credentials = base64.b64encode('{}:{}'.format(
+ self._app_config['Username'],
+ self._app_config['Password']).encode()).decode()
+ logging.info('Authentication credentials are: {}'.format(credentials))
+ try:
+ request = url.Request(server_url)
+ request.add_header('Authorization', 'Basic {}'.format(credentials))
+ request.add_header('Content-Type', 'application/json')
+ event_str = json.dumps(event).encode()
+ logging.debug("Sending {} to {}".format(event_str, server_url))
+ url.urlopen(request, event_str, timeout=1)
+ logging.debug("Sent data to {} successfully".format(server_url))
+ except url.HTTPError as e:
+ logging.error('Vendor Event Listener exception: {}'.format(e))
+ except url.URLError as e:
+ logging.error(
+ 'Vendor Event Listener is is not reachable: {}'.format(e))
+ except Exception as e:
+ logging.error('Vendor Event Listener error: {}'.format(e))
+
+ def config(self, config):
+ """VES option configuration"""
+ for key, value in config.items('config'):
+ if key in self._app_config:
+ try:
+ if type(self._app_config[key]) == int:
+ value = int(value)
+ elif type(self._app_config[key]) == float:
+ value = float(value)
+ elif type(self._app_config[key]) == bool:
+ value = bool(strtobool(value))
+
+ if isinstance(value, type(self._app_config[key])):
+ self._app_config[key] = value
+ else:
+ logging.error("Type mismatch with %s" % key)
+ sys.exit()
+ except ValueError:
+ logging.error("Incorrect value type for %s" % key)
+ sys.exit()
+ else:
+ logging.error("Incorrect key configuration %s" % key)
+ sys.exit()
+
+ def init(self, configfile, schema_file):
+ if configfile is not None:
+ # read VES configuration file if provided
+ config = configparser.ConfigParser()
+ config.optionxform = lambda option: option
+ config.read(configfile)
+ self.config(config)
+ # initialize normalizer
+ self.initialize(schema_file, self._app_config['SendEventInterval'])
+
+ def run(self):
+ """Consumer JSON data from kafka broker"""
+ kafka_server = '{}:{}'.format(
+ self._app_config.get('KafkaBroker'),
+ self._app_config.get('KafkaPort'))
+ consumer = KafkaConsumer(
+ 'collectd', bootstrap_servers=kafka_server,
+ auto_offset_reset='latest', enable_auto_commit=False,
+ value_deserializer=lambda m: json.loads(m.decode('ascii')))
+
+ for message in consumer:
+ for kafka_data in message.value:
+ # {
+ # u'dstypes': [u'derive'],
+ # u'plugin': u'cpu',
+ # u'dsnames': [u'value'],
+ # u'interval': 10.0,
+ # u'host': u'localhost',
+ # u'values': [99.9978996416267],
+ # u'time': 1502114956.244,
+ # u'plugin_instance': u'44',
+ # u'type_instance': u'idle',
+ # u'type': u'cpu'
+ # }
+ logging.debug('{}:run():data={}'.format(
+ self.__class__.__name__, kafka_data))
+ for ds_name in kafka_data['dsnames']:
+ index = kafka_data['dsnames'].index(ds_name)
+ val_hash = CollectdValue.hash_gen(
+ kafka_data['host'], kafka_data['plugin'],
+ kafka_data['plugin_instance'], kafka_data['type'],
+ kafka_data['type_instance'], ds_name)
+ collector = self.get_collector()
+ val = collector.get(val_hash)
+ if val:
+ # update the value
+ val.value = kafka_data['values'][index]
+ val.time = kafka_data['time']
+ del(val)
+ else:
+ # add new value into the collector
+ val = CollectdValue()
+ val.host = kafka_data['host']
+ val.plugin = kafka_data['plugin']
+ val.plugin_instance = kafka_data['plugin_instance']
+ val.type = kafka_data['type']
+ val.type_instance = kafka_data['type_instance']
+ val.value = kafka_data['values'][index]
+ val.interval = kafka_data['interval']
+ val.time = kafka_data['time']
+ val.ds_name = ds_name
+ collector.add(val)
+
+
+def main():
+ # Parsing cmdline options
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--events-schema", dest="schema", required=True,
+ help="YAML events schema definition", metavar="FILE")
+ parser.add_argument("--config", dest="configfile", default=None,
+ help="Specify config file", metavar="FILE")
+ parser.add_argument("--loglevel", dest="level", default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
+ help="Specify log level (default: %(default)s)",
+ metavar="LEVEL")
+ parser.add_argument("--logfile", dest="logfile", default='ves_app.log',
+ help="Specify log file (default: %(default)s)",
+ metavar="FILE")
+ args = parser.parse_args()
+
+ # Create log file
+ logging.basicConfig(filename=args.logfile,
+ format='%(asctime)s %(message)s',
+ level=args.level)
+ if args.configfile is None:
+ logging.warning("No configfile specified, using default options")
+
+ # Create Application Instance
+ application_instance = VESApp()
+ application_instance.init(args.configfile, args.schema)
+
+ try:
+ # Run the plugin
+ application_instance.run()
+ except KeyboardInterrupt:
+ logging.info(" - Ctrl-C handled, exiting gracefully")
+ except Exception as e:
+ logging.error('{}, {}'.format(type(e), e))
+ finally:
+ application_instance.destroy()
+ sys.exit()
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+---
+# Common event header definition (required fields and defaults)
+commonEventHeader: &commonEventHeader
+ domain: "measurement"
+ eventId: "{system.id}"
+ eventName: ""
+ eventType: Info
+ lastEpochMicrosec: 0
+ priority: Normal
+ reportingEntityId: &reportingEntityId "{system.hostname}"
+ reportingEntityName: *reportingEntityId
+ sequence: 0
+ sourceName: N/A
+ startEpochMicrosec: 0
+ version: "4.0"
+ vesEventListenerVersion: "7.2.1"
+
+# Host measurements definition
+Host Measurements: !Measurements
+ - ITEM-DESC:
+ event:
+ commonEventHeader: &hostCommonEventHeader
+ <<: *commonEventHeader
+ eventType: platform
+ domain: measurement
+ sourceId: &sourceId "{vl.host}"
+ sourceName: *sourceId
+ startEpochMicrosec: !Number "{vl.time}"
+ measurementFields: &hostMeasurementFields
+ measurementFieldsVersion: "4.0"
+ measurementInterval: !Number "{vl.interval}"
+ loadArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: load
+ type: load
+ ds_name: midterm
+ - ITEM-DESC:
+ midTerm : !Number "{vl.value}"
+ shortTerm : !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: load
+ type: load
+ ds_name: shortterm
+ longTerm : !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: load
+ type: load
+ ds_name: longterm
+ memoryUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: free
+ - ITEM-DESC:
+ vmIdentifier: "{vl.host}"
+ memoryFree: !Number "{vl.value}"
+ memoryUsed: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: used
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memoryBuffered: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: buffered
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memoryCached: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: cached
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ memorySlabRecl: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: slab_recl
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ - DEFAULT: 0
+ memorySlabUnrecl: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: memory
+ type: memory
+ type_instance: slab_unrecl
+ - VALUE: !Bytes2Kibibytes "{vl.value}"
+ - DEFAULT: 0
+ cpuUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ type: percent
+ type_instance: idle
+ - ITEM-DESC:
+ cpuIdentifier: "{vl.plugin_instance}"
+ cpuIdle: !Number "{vl.value}"
+ percentUsage: 0.0
+ cpuUsageUser: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: user
+ cpuWait: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: wait
+ cpuUsageInterrupt: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: interrupt
+ cpuUsageNice: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: nice
+ cpuUsageSoftIrq: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: softirq
+ cpuUsageSteal: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: steal
+ cpuUsageSystem: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: cpu
+ plugin_instance: "{vl.plugin_instance}"
+ type: percent
+ type_instance: system
+ nicPerformanceArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ type: if_packets
+ ds_name: rx
+ - ITEM-DESC:
+ valuesAreSuspect: "true"
+ nicIdentifier: "{vl.plugin_instance}"
+ receivedTotalPacketsAccumulated: !Number "{vl.value}"
+ transmittedTotalPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_packets
+ ds_name: tx
+ receivedOctetsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_octets
+ ds_name: rx
+ transmittedOctetsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_octets
+ ds_name: tx
+ receivedErrorPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_errors
+ ds_name: rx
+ transmittedErrorPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_errors
+ ds_name: tx
+ receivedDiscardedPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_dropped
+ ds_name: rx
+ transmittedDiscardedPacketsAccumulated: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: interface
+ plugin_instance: "{vl.plugin_instance}"
+ type: if_dropped
+ ds_name: tx
+ diskUsageArray: !ArrayItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ type: disk_octets
+ ds_name: read
+ - ITEM-DESC:
+ diskIdentifier: "{vl.plugin_instance}"
+ diskOctetsReadLast: !Number "{vl.value}"
+ diskOctetsWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_octets
+ ds_name: write
+ diskOpsReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_ops
+ ds_name: read
+ diskOpsWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_ops
+ ds_name: write
+ diskIoTimeLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_io_time
+ ds_name: io_time
+ - DEFAULT: 0
+ diskMergedReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_merged
+ ds_name: read
+ - DEFAULT: 0
+ diskMergedWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_merged
+ ds_name: write
+ - DEFAULT: 0
+ diskTimeReadLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_time
+ ds_name: read
+ - DEFAULT: 0
+ diskTimeWriteLast: !ValueItem
+ - SELECT:
+ host: "{vl.host}"
+ plugin: disk
+ plugin_instance: "{vl.plugin_instance}"
+ type: disk_time
+ ds_name: write
+ - DEFAULT: 0
+ - SELECT:
+ plugin: memory
+ type_instance: free
--- /dev/null
+Original work Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
+Modified work Copyright 2021 Xoriant Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+#!/bin/bash
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Startup script for the OPNFV VES Agent running under docker.
+
+echo "$ves_kafka_host $ves_kafka_hostname" >>/etc/hosts
+echo "ves_kafka_hostname=$ves_kafka_hostname"
+echo "*** /etc/hosts ***"
+cat /etc/hosts
+
+cd /opt/ves/barometer/3rd_party/collectd-ves-app/ves_app
+cat <<EOF >ves_app_config.conf
+[config]
+Domain = $ves_host
+Port = $ves_port
+Path = $ves_path
+Topic = $ves_topic
+UseHttps = $ves_https
+Username = $ves_user
+Password = $ves_pass
+SendEventInterval = $ves_interval
+ApiVersion = $ves_version
+KafkaPort = $ves_kafka_port
+KafkaBroker = $ves_kafka_host
+EOF
+
+cat ves_app_config.conf
+echo "ves_mode=$ves_mode"
+
+if [[ "$ves_loglevel" == "" ]]; then
+ ves_loglevel=ERROR
+fi
+
+python ves_app.py --events-schema=$ves_mode.yaml --loglevel $ves_loglevel \
+ --config=ves_app_config.conf
+
+# Dump ves_app.log if the command above exits (fails)
+echo "*** ves_app.log ***"
+cat ves_app.log
"id": null,
"links": [],
"refresh": "10s",
- "rows": [
+ "panels": [
{
- "collapse": false,
- "height": 401,
- "panels": [
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {
+ "leftLogBase": 1,
+ "leftMax": null,
+ "leftMin": null,
+ "rightLogBase": 1,
+ "rightMax": null,
+ "rightMin": null
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 3,
+ "interval": "30s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "id": 3,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- }
- ],
- "measurement": "load",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "load-shortterm"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "params": [
+ "system"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "host load",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
+ "measurement": "measurementload",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT moving_average(\"longTerm\", 5) AS \"alias\", moving_average(\"midTerm\", 5), moving_average(\"shortTerm\", 5) FROM \"measurementload\" WHERE (\"system\" =~ /^$host$/) AND $timeFilter GROUP BY \"system\"",
+ "rawQuery": false,
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "longTerm"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Long Term"
+ ],
+ "type": "alias"
+ }
+ ],
+ [
+ {
+ "params": [
+ "midTerm"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Mid Term"
+ ],
+ "type": "alias"
+ }
+ ],
+ [
+ {
+ "params": [
+ "shortTerm"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Short Term"
+ ],
+ "type": "alias"
+ }
+ ]
],
- "yaxes": [
- {
- "format": "short",
- "label": "Percent",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
}
]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "host load",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "x-axis": true,
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "y-axis": true,
+ "y_formats": [
+ "short",
+ "short"
+ ],
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Percent",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {
+ "leftLogBase": 1,
+ "leftMax": null,
+ "leftMin": null,
+ "rightLogBase": 1,
+ "rightMax": null,
+ "rightMin": null
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "hiddenSeries": false,
+ "id": 6,
+ "interval": "30s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "id": 6,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "cpu"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "cpuUsage",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "cpuUsageUser"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "type": "tag"
+ },
+ {
+ "params": [
+ "cpuIdentifier"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "host CPU Usage User",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
+ "measurement": "measurementcpuusage",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "cpuUsageUser"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ }
+ ]
],
- "yaxes": [
+ "tags": [
{
- "format": "short",
- "label": "Percent",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
}
]
}
],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": false,
- "title": "Dashboard Row",
- "titleSize": "h6"
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "host CPU Usage User",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "x-axis": true,
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "y-axis": true,
+ "y_formats": [
+ "short",
+ "short"
+ ],
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Percent",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
- "collapse": false,
- "height": 442,
- "panels": [
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {
+ "leftLogBase": 1,
+ "leftMax": null,
+ "leftMin": null,
+ "rightLogBase": 1,
+ "rightMax": null,
+ "rightMin": null
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 0,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 2,
+ "interval": "30s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "id": 2,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "vnic"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "vNicPerformance",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT derivative(mean(\"rxoctetsacc\"), 10s) FROM \"vnic\" WHERE \"system\" = 'computehost' AND $timeFilter GROUP BY time(1m) fill(null)",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "receivedTotalPacketsAccumulated"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "type": "tag"
+ },
+ {
+ "params": [
+ "nicIdentifier"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Received Octets",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
+ "measurement": "measurementnicperformance",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "query": "SELECT moving_average(\"receivedTotalPacketsAccumulated\", 5) FROM \"measurementnicperformance\" WHERE (\"system\" =~ /^$host$/) AND $timeFilter GROUP BY \"system\", \"nicIdentifier\"",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "receivedTotalPacketsAccumulated"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ }
+ ]
],
- "yaxes": [
- {
- "format": "short",
- "label": "Octets/Packets",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
}
]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Received Octets",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "x-axis": true,
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "y-axis": true,
+ "y_formats": [
+ "short",
+ "short"
+ ],
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Octets/Packets",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "grid": {
+ "leftLogBase": 1,
+ "leftMax": null,
+ "leftMin": null,
+ "rightLogBase": 1,
+ "rightMax": null,
+ "rightMin": null
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 12,
+ "x": 12,
+ "y": 11
+ },
+ "hiddenSeries": false,
+ "id": 4,
+ "interval": "30s",
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "rightSide": false,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 2,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "id": 4,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "vnic"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "vNicPerformance",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "receivedOctetsAccumulated"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "type": "tag"
+ },
+ {
+ "params": [
+ "nicIdentifier"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Transmitted Octets",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
+ "measurement": "measurementnicperformance",
+ "orderByTime": "ASC",
+ "policy": "default",
+ "refId": "B",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "receivedOctetsAccumulated"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ }
+ ]
],
- "yaxes": [
- {
- "format": "short",
- "label": "Octets/Packets",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
}
]
}
],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": false,
- "title": "Dashboard Row",
- "titleSize": "h6"
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Transmitted Octets",
+ "tooltip": {
+ "shared": true,
+ "sort": 2,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "x-axis": true,
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "y-axis": true,
+ "y_formats": [
+ "short",
+ "short"
+ ],
+ "yaxes": [
+ {
+ "format": "short",
+ "label": "Octets/Packets",
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
- "collapse": false,
- "height": 362,
- "panels": [
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 0,
+ "y": 23
+ },
+ "hiddenSeries": false,
+ "id": 7,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "id": 7,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "disk"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "diskUsage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "diskOpsWriteLast"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "type": "tag"
+ },
+ {
+ "params": [
+ "diskIdentifier"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- },
- {
- "condition": "AND",
- "key": "disk",
- "operator": "=",
- "value": "sda"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Disk Usage SDA",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
+ "measurement": "measurementdiskusage",
+ "orderByTime": "ASC",
+ "policy": "autogen",
+ "query": "SELECT moving_average(\"diskOpsWriteLast\", 5) FROM \"autogen\".\"diskUsage\" WHERE (\"system\" =~ /^$host$/ AND \"disk\" = 'sda') AND $timeFilter GROUP BY \"system\", \"disk\"",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "diskOpsWriteLast"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ }
+ ]
+ ],
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
},
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "condition": "AND",
+ "key": "diskIdentifier",
+ "operator": "=",
+ "value": "sda"
}
]
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Disk Usage SDA",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
},
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 10,
+ "w": 12,
+ "x": 12,
+ "y": 23
+ },
+ "hiddenSeries": false,
+ "id": 8,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 6,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "disk"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "diskUsage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "diskOpsWriteLast"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
+ "type": "tag"
+ },
+ {
+ "params": [
+ "diskIdentifier"
],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- },
- {
- "condition": "AND",
- "key": "disk",
- "operator": "=",
- "value": "sdb"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Disk Usage SDB",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
+ "measurement": "measurementdiskusage",
+ "orderByTime": "ASC",
+ "policy": "autogen",
+ "query": "SELECT moving_average(\"diskOpsWriteLast\", 5) FROM \"autogen\".\"measurementdiskusage\" WHERE (\"system\" =~ /^$host$/ AND \"diskIdentifier\" = 'sdb') AND $timeFilter GROUP BY \"system\", \"diskIdentifier\"",
+ "rawQuery": false,
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "diskOpsWriteLast"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ }
+ ]
+ ],
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
},
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "condition": "AND",
+ "key": "diskIdentifier",
+ "operator": "=",
+ "value": "sdb"
}
]
}
],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": false,
- "title": "Dashboard Row",
- "titleSize": "h6"
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Disk Usage SDB",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
},
{
- "collapse": false,
- "height": 250,
- "panels": [
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "VESEvents",
+ "fieldConfig": {
+ "defaults": {
+ "custom": {}
+ },
+ "overrides": []
+ },
+ "fill": 1,
+ "fillGradient": 0,
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 33
+ },
+ "hiddenSeries": false,
+ "id": 5,
+ "legend": {
+ "alignAsTable": true,
+ "avg": true,
+ "current": true,
+ "max": true,
+ "min": true,
+ "show": true,
+ "sort": "current",
+ "sortDesc": true,
+ "total": false,
+ "values": true
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "options": {
+ "alertThreshold": true
+ },
+ "percentage": false,
+ "pluginVersion": "7.3.2",
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
{
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fill": 1,
- "id": 5,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "percentage": false,
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "span": 12,
- "stack": false,
- "steppedLine": false,
- "targets": [
+ "alias": "",
+ "dsType": "influxdb",
+ "groupBy": [
{
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- }
+ "params": [
+ "system"
],
- "measurement": "memoryUsage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "memoryUsed"
- ],
- "type": "field"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
+ "type": "tag"
}
],
- "thresholds": [],
- "timeFrom": null,
- "timeShift": null,
- "title": "Memory",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
+ "measurement": "measurementmemoryusage",
+ "orderByTime": "ASC",
+ "policy": "autogen",
+ "refId": "A",
+ "resultFormat": "time_series",
+ "select": [
+ [
+ {
+ "params": [
+ "memoryCached"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Memory Cached"
+ ],
+ "type": "alias"
+ }
+ ],
+ [
+ {
+ "params": [
+ "memoryUsed"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Memory Used"
+ ],
+ "type": "alias"
+ }
+ ],
+ [
+ {
+ "params": [
+ "memoryFree"
+ ],
+ "type": "field"
+ },
+ {
+ "params": [
+ "5"
+ ],
+ "type": "moving_average"
+ },
+ {
+ "params": [
+ "Memory Free"
+ ],
+ "type": "alias"
+ }
+ ]
+ ],
+ "tags": [
{
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
+ "key": "system",
+ "operator": "=~",
+ "value": "/^$host$/"
}
]
}
],
- "repeat": null,
- "repeatIteration": null,
- "repeatRowId": null,
- "showTitle": false,
- "title": "Dashboard Row",
- "titleSize": "h6"
+ "thresholds": [],
+ "timeFrom": null,
+ "timeRegions": [],
+ "timeShift": null,
+ "title": "Memory",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 10,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ],
+ "yaxis": {
+ "align": false,
+ "alignLevel": null
+ }
}
],
- "schemaVersion": 14,
+ "refresh": "10s",
+ "schemaVersion": 26,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"allValue": null,
- "current": {},
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
"datasource": "VESEvents",
+ "definition": "",
+ "error": null,
"hide": 0,
"includeAll": true,
"label": "host",
"query": "SHOW TAG VALUES WITH KEY=system",
"refresh": 1,
"regex": "",
+ "skipUrlSync": false,
"sort": 0,
"tagValuesQuery": "",
"tags": [],
]
},
"time": {
- "from": "now-30m",
+ "from": "now-15m",
"to": "now"
},
"timepicker": {
"timezone": "browser",
"title": "VES Demo",
"version": 4
+ }
}
-}
+
# Copyright 2017-2018 AT&T Intellectual Property, Inc
-#
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# Status: this is a work in progress, under test.
#
-FROM ubuntu:xenial
+FROM ubuntu:focal
+
-RUN apt-get update && apt-get install -y apt-utils
-RUN apt-get -y upgrade
-RUN apt-get update && apt-get install -y git python-pip python-jsonschema curl
-RUN pip install requests pytz tzlocal
+RUN apt-get update && apt-get -y upgrade
+RUN apt-get install -y git curl python3 python3-pip
+RUN pip3 install requests jsonschema
RUN mkdir /opt/ves
-# copy VES Collector over to the Docker
+# Clone VES Collector
RUN mkdir /opt/ves/evel-test-collector
ADD evel-test-collector /opt/ves/evel-test-collector
BSD License
-Copyright (c) 2016, AT&T Intellectual Property. All other rights reserved.
+Original work Copyright (c) 2016, AT&T Intellectual Property. All other rights reserved.
+Modified work Copyright 2021 Xoriant Corporation
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
+++ /dev/null
-NOTE: This folder and subfolders have not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
+++ /dev/null
-#!/usr/bin/env python
-'''
-Program which acts as the collector for the Vendor Event Listener REST API.
-
-Only intended for test purposes.
-
-License
--------
-
-Copyright(c) <2016>, AT&T Intellectual Property. All other rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-3. All advertising materials mentioning features or use of this software
- must display the following acknowledgement: This product includes
- software developed by the AT&T.
-4. Neither the name of AT&T nor the names of its contributors may be used to
- endorse or promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY ''AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-'''
-
-from rest_dispatcher import PathDispatcher, set_404_content
-from wsgiref.simple_server import make_server
-import sys
-import os
-import platform
-import traceback
-import time
-from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
-import ConfigParser
-import logging.handlers
-from base64 import b64decode
-import string
-import json
-import jsonschema
-from functools import partial
-
-_hello_resp = '''\
-<html>
- <head>
- <title>Hello {name}</title>
- </head>
- <body>
- <h1>Hello {name}!</h1>
- </body>
-</html>'''
-
-_localtime_resp = '''\
-<?xml version="1.0"?>
-<time>
- <year>{t.tm_year}</year>
- <month>{t.tm_mon}</month>
- <day>{t.tm_mday}</day>
- <hour>{t.tm_hour}</hour>
- <minute>{t.tm_min}</minute>
- <second>{t.tm_sec}</second>
-</time>'''
-
-__all__ = []
-__version__ = 0.1
-__date__ = '2015-12-04'
-__updated__ = '2015-12-04'
-
-TESTRUN = False
-DEBUG = False
-PROFILE = False
-
-#------------------------------------------------------------------------------
-# Credentials we expect clients to authenticate themselves with.
-#------------------------------------------------------------------------------
-vel_username = ''
-vel_password = ''
-
-#------------------------------------------------------------------------------
-# The JSON schema which we will use to validate events.
-#------------------------------------------------------------------------------
-vel_schema = None
-
-#------------------------------------------------------------------------------
-# The JSON schema which we will use to validate client throttle state.
-#------------------------------------------------------------------------------
-throttle_schema = None
-
-#------------------------------------------------------------------------------
-# The JSON schema which we will use to provoke throttling commands for testing.
-#------------------------------------------------------------------------------
-test_control_schema = None
-
-#------------------------------------------------------------------------------
-# Pending command list from the testControl API
-# This is sent as a response commandList to the next received event.
-#------------------------------------------------------------------------------
-pending_command_list = None
-
-#------------------------------------------------------------------------------
-# Logger for this module.
-#------------------------------------------------------------------------------
-logger = None
-
-def listener(environ, start_response, schema):
- '''
- Handler for the Vendor Event Listener REST API.
-
- Extract headers and the body and check that:
-
- 1) The client authenticated themselves correctly.
- 2) The body validates against the provided schema for the API.
-
- '''
- logger.info('Got a Vendor Event request')
- print('==== ' + time.asctime() + ' ' + '=' * 49)
-
- #--------------------------------------------------------------------------
- # Extract the content from the request.
- #--------------------------------------------------------------------------
- length = int(environ.get('CONTENT_LENGTH', '0'))
- logger.debug('Content Length: {0}'.format(length))
- body = environ['wsgi.input'].read(length)
- logger.debug('Content Body: {0}'.format(body))
-
- mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION',
- 'None None'))
- # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode,
- # b64_credentials))
- logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode))
- if (b64_credentials != 'None'):
- credentials = b64decode(b64_credentials)
- else:
- credentials = None
-
- # logger.debug('Credentials: {0}'.format(credentials))
- logger.debug('Credentials: ****')
-
- #--------------------------------------------------------------------------
- # If we have a schema file then check that the event matches that expected.
- #--------------------------------------------------------------------------
- if (schema is not None):
- logger.debug('Attempting to validate data: {0}\n'
- 'Against schema: {1}'.format(body, schema))
- try:
- decoded_body = json.loads(body)
- jsonschema.validate(decoded_body, schema)
- logger.info('Event is valid!')
- print('Valid body decoded & checked against schema OK:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
-
- except jsonschema.SchemaError as e:
- logger.error('Schema is not valid! {0}'.format(e))
- print('Schema is not valid! {0}'.format(e))
-
- except jsonschema.ValidationError as e:
- logger.warn('Event is not valid against schema! {0}'.format(e))
- print('Event is not valid against schema! {0}'.format(e))
- print('Bad JSON body decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
-
- except Exception as e:
- logger.error('Event invalid for unexpected reason! {0}'.format(e))
- print('Schema is not valid for unexpected reason! {0}'.format(e))
- else:
- logger.debug('No schema so just decode JSON: {0}'.format(body))
- try:
- decoded_body = json.loads(body)
- print('Valid JSON body (no schema checking) decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
- logger.info('Event is valid JSON but not checked against schema!')
-
- except Exception as e:
- logger.error('Event invalid for unexpected reason! {0}'.format(e))
- print('JSON body not valid for unexpected reason! {0}'.format(e))
-
- #--------------------------------------------------------------------------
- # See whether the user authenticated themselves correctly.
- #--------------------------------------------------------------------------
- if (credentials == (vel_username + ':' + vel_password)):
- logger.debug('Authenticated OK')
- print('Authenticated OK')
-
- #----------------------------------------------------------------------
- # Respond to the caller. If we have a pending commandList from the
- # testControl API, send it in response.
- #----------------------------------------------------------------------
- global pending_command_list
- if pending_command_list is not None:
- start_response('202 Accepted',
- [('Content-type', 'application/json')])
- response = pending_command_list
- pending_command_list = None
-
- print('\n'+ '='*80)
- print('Sending pending commandList in the response:\n'
- '{0}'.format(json.dumps(response,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
- print('='*80 + '\n')
- yield json.dumps(response)
- else:
- start_response('202 Accepted', [])
- yield ''
- else:
- logger.warn('Failed to authenticate OK')
- print('Failed to authenticate OK')
-
- #----------------------------------------------------------------------
- # Respond to the caller.
- #----------------------------------------------------------------------
- start_response('401 Unauthorized', [ ('Content-type',
- 'application/json')])
- req_error = { 'requestError': {
- 'policyException': {
- 'messageId': 'POL0001',
- 'text': 'Failed to authenticate'
- }
- }
- }
- yield json.dumps(req_error)
-
-def test_listener(environ, start_response, schema):
- '''
- Handler for the Test Collector Test Control API.
-
- There is no authentication on this interface.
-
- This simply stores a commandList which will be sent in response to the next
- incoming event on the EVEL interface.
- '''
- global pending_command_list
- logger.info('Got a Test Control input')
- print('============================')
- print('==== TEST CONTROL INPUT ====')
-
- #--------------------------------------------------------------------------
- # GET allows us to get the current pending request.
- #--------------------------------------------------------------------------
- if environ.get('REQUEST_METHOD') == 'GET':
- start_response('200 OK', [('Content-type', 'application/json')])
- yield json.dumps(pending_command_list)
- return
-
- #--------------------------------------------------------------------------
- # Extract the content from the request.
- #--------------------------------------------------------------------------
- length = int(environ.get('CONTENT_LENGTH', '0'))
- logger.debug('TestControl Content Length: {0}'.format(length))
- body = environ['wsgi.input'].read(length)
- logger.debug('TestControl Content Body: {0}'.format(body))
-
- #--------------------------------------------------------------------------
- # If we have a schema file then check that the event matches that expected.
- #--------------------------------------------------------------------------
- if (schema is not None):
- logger.debug('Attempting to validate data: {0}\n'
- 'Against schema: {1}'.format(body, schema))
- try:
- decoded_body = json.loads(body)
- jsonschema.validate(decoded_body, schema)
- logger.info('TestControl is valid!')
- print('TestControl:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
-
- except jsonschema.SchemaError as e:
- logger.error('TestControl Schema is not valid: {0}'.format(e))
- print('TestControl Schema is not valid: {0}'.format(e))
-
- except jsonschema.ValidationError as e:
- logger.warn('TestControl input not valid: {0}'.format(e))
- print('TestControl input not valid: {0}'.format(e))
- print('Bad JSON body decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
-
- except Exception as e:
- logger.error('TestControl input not valid: {0}'.format(e))
- print('TestControl Schema not valid: {0}'.format(e))
- else:
- logger.debug('Missing schema just decode JSON: {0}'.format(body))
- try:
- decoded_body = json.loads(body)
- print('Valid JSON body (no schema checking) decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
- logger.info('TestControl input not checked against schema!')
-
- except Exception as e:
- logger.error('TestControl input not valid: {0}'.format(e))
- print('TestControl input not valid: {0}'.format(e))
-
- #--------------------------------------------------------------------------
- # Respond to the caller. If we received otherField 'ThrottleRequest',
- # generate the appropriate canned response.
- #--------------------------------------------------------------------------
- pending_command_list = decoded_body
- print('===== TEST CONTROL END =====')
- print('============================')
- start_response('202 Accepted', [])
- yield ''
-
-def main(argv=None):
- '''
- Main function for the collector start-up.
-
- Called with command-line arguments:
- * --config *<file>*
- * --section *<section>*
- * --verbose
-
- Where:
-
- *<file>* specifies the path to the configuration file.
-
- *<section>* specifies the section within that config file.
-
- *verbose* generates more information in the log files.
-
- The process listens for REST API invocations and checks them. Errors are
- displayed to stdout and logged.
- '''
-
- if argv is None:
- argv = sys.argv
- else:
- sys.argv.extend(argv)
-
- program_name = os.path.basename(sys.argv[0])
- program_version = 'v{0}'.format(__version__)
- program_build_date = str(__updated__)
- program_version_message = '%%(prog)s {0} ({1})'.format(program_version,
- program_build_date)
- if (__import__('__main__').__doc__ is not None):
- program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
- else:
- program_shortdesc = 'Running in test harness'
- program_license = '''{0}
-
- Created on {1}.
- Copyright 2015 Metaswitch Networks Ltd. All rights reserved.
-
- Distributed on an "AS IS" basis without warranties
- or conditions of any kind, either express or implied.
-
-USAGE
-'''.format(program_shortdesc, str(__date__))
-
- try:
- #----------------------------------------------------------------------
- # Setup argument parser so we can parse the command-line.
- #----------------------------------------------------------------------
- parser = ArgumentParser(description=program_license,
- formatter_class=ArgumentDefaultsHelpFormatter)
- parser.add_argument('-v', '--verbose',
- dest='verbose',
- action='count',
- help='set verbosity level')
- parser.add_argument('-V', '--version',
- action='version',
- version=program_version_message,
- help='Display version information')
- parser.add_argument('-a', '--api-version',
- dest='api_version',
- default='3',
- help='set API version')
- parser.add_argument('-c', '--config',
- dest='config',
- default='/etc/opt/att/collector.conf',
- help='Use this config file.',
- metavar='<file>')
- parser.add_argument('-s', '--section',
- dest='section',
- default='default',
- metavar='<section>',
- help='section to use in the config file')
-
- #----------------------------------------------------------------------
- # Process arguments received.
- #----------------------------------------------------------------------
- args = parser.parse_args()
- verbose = args.verbose
- api_version = args.api_version
- config_file = args.config
- config_section = args.section
-
- #----------------------------------------------------------------------
- # Now read the config file, using command-line supplied values as
- # overrides.
- #----------------------------------------------------------------------
- defaults = {'log_file': 'collector.log',
- 'vel_port': '12233',
- 'vel_path': '',
- 'vel_topic_name': ''
- }
- overrides = {}
- config = ConfigParser.SafeConfigParser(defaults)
- config.read(config_file)
-
- #----------------------------------------------------------------------
- # extract the values we want.
- #----------------------------------------------------------------------
- log_file = config.get(config_section, 'log_file', vars=overrides)
- vel_port = config.get(config_section, 'vel_port', vars=overrides)
- vel_path = config.get(config_section, 'vel_path', vars=overrides)
- vel_topic_name = config.get(config_section,
- 'vel_topic_name',
- vars=overrides)
- global vel_username
- global vel_password
- vel_username = config.get(config_section,
- 'vel_username',
- vars=overrides)
- vel_password = config.get(config_section,
- 'vel_password',
- vars=overrides)
- vel_schema_file = config.get(config_section,
- 'schema_file',
- vars=overrides)
- base_schema_file = config.get(config_section,
- 'base_schema_file',
- vars=overrides)
- throttle_schema_file = config.get(config_section,
- 'throttle_schema_file',
- vars=overrides)
- test_control_schema_file = config.get(config_section,
- 'test_control_schema_file',
- vars=overrides)
-
- #----------------------------------------------------------------------
- # Finally we have enough info to start a proper flow trace.
- #----------------------------------------------------------------------
- global logger
- print('Logfile: {0}'.format(log_file))
- logger = logging.getLogger('collector')
- if verbose > 0:
- print('Verbose mode on')
- logger.setLevel(logging.DEBUG)
- else:
- logger.setLevel(logging.INFO)
- handler = logging.handlers.RotatingFileHandler(log_file,
- maxBytes=1000000,
- backupCount=10)
- if (platform.system() == 'Windows'):
- date_format = '%Y-%m-%d %H:%M:%S'
- else:
- date_format = '%Y-%m-%d %H:%M:%S.%f %z'
- formatter = logging.Formatter('%(asctime)s %(name)s - '
- '%(levelname)s - %(message)s',
- date_format)
- handler.setFormatter(formatter)
- logger.addHandler(handler)
- logger.info('Started')
-
- #----------------------------------------------------------------------
- # Log the details of the configuration.
- #----------------------------------------------------------------------
- logger.debug('Log file = {0}'.format(log_file))
- logger.debug('Event Listener Port = {0}'.format(vel_port))
- logger.debug('Event Listener Path = {0}'.format(vel_path))
- logger.debug('Event Listener Topic = {0}'.format(vel_topic_name))
- logger.debug('Event Listener Username = {0}'.format(vel_username))
- # logger.debug('Event Listener Password = {0}'.format(vel_password))
- logger.debug('Event Listener JSON Schema File = {0}'.format(
- vel_schema_file))
- logger.debug('Base JSON Schema File = {0}'.format(base_schema_file))
- logger.debug('Throttle JSON Schema File = {0}'.format(
- throttle_schema_file))
- logger.debug('Test Control JSON Schema File = {0}'.format(
- test_control_schema_file))
-
- #----------------------------------------------------------------------
- # Perform some basic error checking on the config.
- #----------------------------------------------------------------------
- if (int(vel_port) < 1024 or int(vel_port) > 65535):
- logger.error('Invalid Vendor Event Listener port ({0}) '
- 'specified'.format(vel_port))
- raise RuntimeError('Invalid Vendor Event Listener port ({0}) '
- 'specified'.format(vel_port))
-
- if (len(vel_path) > 0 and vel_path[-1] != '/'):
- logger.warning('Event Listener Path ({0}) should have terminating '
- '"/"! Adding one on to configured string.'.format(
- vel_path))
- vel_path += '/'
-
- #----------------------------------------------------------------------
- # Load up the vel_schema, if it exists.
- #----------------------------------------------------------------------
- if not os.path.exists(vel_schema_file):
- logger.warning('Event Listener Schema File ({0}) not found. '
- 'No validation will be undertaken.'.format(
- vel_schema_file))
- else:
- global vel_schema
- global throttle_schema
- global test_control_schema
- vel_schema = json.load(open(vel_schema_file, 'r'))
- logger.debug('Loaded the JSON schema file')
-
- #------------------------------------------------------------------
- # Load up the throttle_schema, if it exists.
- #------------------------------------------------------------------
- if (os.path.exists(throttle_schema_file)):
- logger.debug('Loading throttle schema')
- throttle_fragment = json.load(open(throttle_schema_file, 'r'))
- throttle_schema = {}
- throttle_schema.update(vel_schema)
- throttle_schema.update(throttle_fragment)
- logger.debug('Loaded the throttle schema')
-
- #------------------------------------------------------------------
- # Load up the test control _schema, if it exists.
- #------------------------------------------------------------------
- if (os.path.exists(test_control_schema_file)):
- logger.debug('Loading test control schema')
- test_control_fragment = json.load(
- open(test_control_schema_file, 'r'))
- test_control_schema = {}
- test_control_schema.update(vel_schema)
- test_control_schema.update(test_control_fragment)
- logger.debug('Loaded the test control schema')
-
- #------------------------------------------------------------------
- # Load up the base_schema, if it exists.
- #------------------------------------------------------------------
- if (os.path.exists(base_schema_file)):
- logger.debug('Updating the schema with base definition')
- base_schema = json.load(open(base_schema_file, 'r'))
- vel_schema.update(base_schema)
- logger.debug('Updated the JSON schema file')
-
- #----------------------------------------------------------------------
- # We are now ready to get started with processing. Start-up the various
- # components of the system in order:
- #
- # 1) Create the dispatcher.
- # 2) Register the functions for the URLs of interest.
- # 3) Run the webserver.
- #----------------------------------------------------------------------
- root_url = '/{0}eventListener/v{1}{2}'.\
- format(vel_path,
- api_version,
- '/' + vel_topic_name
- if len(vel_topic_name) > 0
- else '')
- throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\
- format(vel_path, api_version)
- set_404_content(root_url)
- dispatcher = PathDispatcher()
- vendor_event_listener = partial(listener, schema = vel_schema)
- dispatcher.register('GET', root_url, vendor_event_listener)
- dispatcher.register('POST', root_url, vendor_event_listener)
- vendor_throttle_listener = partial(listener, schema = throttle_schema)
- dispatcher.register('GET', throttle_url, vendor_throttle_listener)
- dispatcher.register('POST', throttle_url, vendor_throttle_listener)
-
- #----------------------------------------------------------------------
- # We also add a POST-only mechanism for test control, so that we can
- # send commands to a single attached client.
- #----------------------------------------------------------------------
- test_control_url = '/testControl/v{0}/commandList'.format(api_version)
- test_control_listener = partial(test_listener,
- schema = test_control_schema)
- dispatcher.register('POST', test_control_url, test_control_listener)
- dispatcher.register('GET', test_control_url, test_control_listener)
-
- httpd = make_server('', int(vel_port), dispatcher)
- print('Serving on port {0}...'.format(vel_port))
- httpd.serve_forever()
-
- logger.error('Main loop exited unexpectedly!')
- return 0
-
- except KeyboardInterrupt:
- #----------------------------------------------------------------------
- # handle keyboard interrupt
- #----------------------------------------------------------------------
- logger.info('Exiting on keyboard interrupt!')
- return 0
-
- except Exception as e:
- #----------------------------------------------------------------------
- # Handle unexpected exceptions.
- #----------------------------------------------------------------------
- if DEBUG or TESTRUN:
- raise(e)
- indent = len(program_name) * ' '
- sys.stderr.write(program_name + ': ' + repr(e) + '\n')
- sys.stderr.write(indent + ' for help use --help\n')
- sys.stderr.write(traceback.format_exc())
- logger.critical('Exiting because of exception: {0}'.format(e))
- logger.critical(traceback.format_exc())
- return 2
-
-#------------------------------------------------------------------------------
-# MAIN SCRIPT ENTRY POINT.
-#------------------------------------------------------------------------------
-if __name__ == '__main__':
- if TESTRUN:
- #----------------------------------------------------------------------
- # Running tests - note that doctest comments haven't been included so
- # this is a hook for future improvements.
- #----------------------------------------------------------------------
- import doctest
- doctest.testmod()
-
- if PROFILE:
- #----------------------------------------------------------------------
- # Profiling performance. Performance isn't expected to be a major
- # issue, but this should all work as expected.
- #----------------------------------------------------------------------
- import cProfile
- import pstats
- profile_filename = 'collector_profile.txt'
- cProfile.run('main()', profile_filename)
- statsfile = open('collector_profile_stats.txt', 'wb')
- p = pstats.Stats(profile_filename, stream=statsfile)
- stats = p.strip_dirs().sort_stats('cumulative')
- stats.print_stats()
- statsfile.close()
- sys.exit(0)
-
- #--------------------------------------------------------------------------
- # Normal operation - call through to the main function.
- #--------------------------------------------------------------------------
- sys.exit(main())
#!/usr/bin/env python
#
-#Original work Copyright 2016-2017 AT&T Intellectual Property, Inc
-#Modified work Copyright 2021 Xoriant Corporation
+# Original work Copyright 2016-2017 AT&T Intellectual Property, Inc
+# Modified work Copyright 2021 Xoriant Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# See the License for the specific language governing permissions and
# limitations under the License.
#
+# What this is: Monitor and closed-loop policy agent as part of the OPNFV VES
+# ves_onap_demo.
+#
+# Status: this is a work in progress, under test.
from rest_dispatcher import PathDispatcher, set_404_content
from wsgiref.simple_server import make_server
import os
import platform
import traceback
-import time
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
-import ConfigParser
+import configparser
import logging.handlers
from base64 import b64decode
-import string
import json
import jsonschema
from functools import partial
import requests
-from datetime import datetime, date, time
-import calendar
-import datetime
import time
-import tzlocal
-import pytz
monitor_mode = "f"
-vdu_id = ['','','','','','']
-summary_e = ['***** Summary of key stats *****','','','']
+vdu_id = ['', '', '', '', '', '']
+summary_e = ['***** Summary of key stats *****', '', '', '']
summary_c = ['Collectd agents:']
-status = ['','Started','Started','Started']
+status = ['', 'Started', 'Started', 'Started']
base_url = ''
template_404 = b'''POST {0}'''
columns = 0
rows = 0
+
class JSONObject:
- def __init__(self, d):
- self.__dict__ = d
+ def __init__(self, d):
+ self.__dict__ = d
+
__all__ = []
__version__ = 0.1
DEBUG = False
PROFILE = False
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# Address of influxdb server.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
influxdb = '127.0.0.1'
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# Credentials we expect clients to authenticate themselves with.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
vel_username = ''
vel_password = ''
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# The JSON schema which we will use to validate events.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
vel_schema = None
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# The JSON schema which we will use to validate client throttle state.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
throttle_schema = None
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# The JSON schema which we will use to provoke throttling commands for testing.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
test_control_schema = None
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# Pending command list from the testControl API
# This is sent as a response commandList to the next received event.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
pending_command_list = None
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# Logger for this module.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
logger = None
+
def listener(environ, start_response, schema):
'''
Handler for the Vendor Event Listener REST API.
logger.info('Got a Vendor Event request')
logger.info('==== ' + time.asctime() + ' ' + '=' * 49)
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
+ # GET request.
+ # --------------------------------------------------------------------------
+ if environ.get('REQUEST_METHOD') == 'GET':
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ yield ('POST ' + get_info).encode()
+ return
+
+ # --------------------------------------------------------------------------
# Extract the content from the request.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
length = int(environ.get('CONTENT_LENGTH', '0'))
logger.debug('Content Length: {0}'.format(length))
body = environ['wsgi.input'].read(length)
logger.debug('Content Body: {0}'.format(body))
- mode, b64_credentials = string.split(environ.get('HTTP_AUTHORIZATION',
- 'None None'))
- # logger.debug('Auth. Mode: {0} Credentials: {1}'.format(mode,
- # b64_credentials))
+ mode, b64_credentials = str.split(environ.get('HTTP_AUTHORIZATION',
+ 'None None'))
logger.debug('Auth. Mode: {0} Credentials: ****'.format(mode))
if (b64_credentials != 'None'):
credentials = b64decode(b64_credentials)
else:
credentials = None
- # logger.debug('Credentials: {0}'.format(credentials))
logger.debug('Credentials: ****')
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# If we have a schema file then check that the event matches that expected.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
if (schema is not None):
logger.debug('Attempting to validate data: {0}\n'
'Against schema: {1}'.format(body, schema))
decoded_body = json.loads(body)
jsonschema.validate(decoded_body, schema)
logger.info('Event is valid!')
- logger.debug('Valid body decoded & checked against schema OK:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
- #--------------------------------------------------------------------------
+ logger.info('Valid body decoded & checked against schema OK:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
+ # --------------------------------------------------------------------------
# See whether the user authenticated themselves correctly.
- #--------------------------------------------------------------------------
- if (credentials == (vel_username + ':' + vel_password)):
- logger.debug('Authenticated OK')
+ # --------------------------------------------------------------------------
+ if (credentials == bytes((vel_username
+ + ':' + vel_password), 'utf-8')):
+ logger.info('Authenticated OK')
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Respond to the caller. If we have a pending commandList from the
# testControl API, send it in response.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
global pending_command_list
if pending_command_list is not None:
start_response('202 Accepted',
- [('Content-type', 'application/json')])
+ [('Content-type', 'application/json')])
response = pending_command_list
pending_command_list = None
- logger.debug('\n'+ '='*80)
+ logger.debug('\n' + '='*80)
logger.debug('Sending pending commandList in the response:\n'
- '{0}'.format(json.dumps(response,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ '{0}'.format(json.dumps(response,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
logger.debug('='*80 + '\n')
- yield json.dumps(response)
+ yield json.dumps(response).encode()
else:
start_response('202 Accepted', [])
- yield ''
+ yield ''.encode()
else:
- logger.warn('Failed to authenticate OK; creds: ' + credentials)
- logger.warn('Failed to authenticate agent credentials: ', credentials,
- 'against expected ', vel_username, ':', vel_password)
-
- #----------------------------------------------------------------------
+ logger.warn('Failed to authenticate OK; creds: '
+ + credentials)
+ logger.warn('Failed to authenticate agent credentials: ',
+ credentials,
+ 'against expected ',
+ vel_username,
+ ':',
+ vel_password)
+
+ # ----------------------------------------------------------------------
# Respond to the caller.
- #----------------------------------------------------------------------
- start_response('401 Unauthorized', [ ('Content-type',
- 'application/json')])
- req_error = { 'requestError': {
+ # ----------------------------------------------------------------------
+ start_response('401 Unauthorized', [('Content-type',
+ 'application/json')])
+ req_error = {'requestError': {
'policyException': {
'messageId': 'POL0001',
- 'text': 'Failed to authenticate'
- }
+ 'text': 'Failed to authenticate'
+ }
}
}
yield json.dumps(req_error)
except jsonschema.ValidationError as e:
logger.warn('Event is not valid against schema! {0}'.format(e))
logger.warn('Bad JSON body decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
except Exception as e:
logger.error('Event invalid for unexpected reason! {0}'.format(e))
try:
decoded_body = json.loads(body)
logger.warn('Valid JSON body (no schema checking) decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
logger.warn('Event is valid JSON but not checked against schema!')
except Exception as e:
logger.error('Event invalid for unexpected reason! {0}'.format(e))
-#--------------------------------------------------------------------------
+
+# --------------------------------------------------------------------------
# Send event to influxdb
-#--------------------------------------------------------------------------
-def send_to_influxdb(event,pdata):
- url = 'http://{}/write?db=veseventsdb'.format(influxdb)
- logger.debug('Send {} to influxdb at {}: {}'.format(event,influxdb,pdata))
- r = requests.post(url, data=pdata, headers={'Content-Type': 'text/plain'})
- logger.debug('influxdb return code {}'.format(r.status_code))
- if r.status_code != 204:
- logger.debug('*** Influxdb save failed, return code {} ***'.format(r.status_code))
-
-#--------------------------------------------------------------------------
-# Convert timestamp to integer
-#--------------------------------------------------------------------------
-def convertTimestampToInt(timestamp, timeFormat="%Y-%m-%dT%H:%M:%S.%fz"):
- date_time_obj = datetime.datetime.strptime(timestamp, timeFormat)
- local_timezone = tzlocal.get_localzone();
- local_timestamp = date_time_obj.replace(tzinfo=pytz.utc).astimezone(local_timezone).strftime(timeFormat)
- date_time_obj_new = datetime.datetime.strptime(local_timestamp, timeFormat)
- unixtime = time.mktime(date_time_obj_new.timetuple())
- return int(float(unixtime) * float(1000000000))
-
-#--------------------------------------------------------------------------
-# Save event data
-#--------------------------------------------------------------------------
-def save_event_in_db(body):
- jobj = json.loads(body)
- e = json.loads(body, object_hook=JSONObject)
-
- domain = jobj['event']['commonEventHeader']['domain']
- timestamp = jobj['event']['commonEventHeader']['lastEpochMicrosec']
- agent = jobj['event']['commonEventHeader']['reportingEntityName'].upper( )
- if "LOCALHOST" in agent:
- agent = "computehost"
- source = jobj['event']['commonEventHeader']['sourceId'].upper( )
-
-###################################################
- ## processing common header part
- pdata = domain
- nonstringpdata = " "
- commonHeaderObj = jobj['event']['commonEventHeader'].items()
- for key,val in commonHeaderObj:
- if val != "" :
- if isinstance(val, unicode):
- pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
- else:
- nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
-
-
- ## processing pnfRegistration events
- if 'pnfRegistrationFields' in jobj['event']:
- logger.debug('Found pnfRegistrationFields')
-
- d = jobj['event']['pnfRegistrationFields'].items()
- for key,val in d:
- if key != 'additionalFields' and val != "" :
- if isinstance(val, unicode):
- pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
- else:
- nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
- elif key == 'additionalFields':
- for key2,val2 in val.items():
- if val2 != "" and isinstance(val2, unicode):
- pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
- elif val2 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
-
- send_to_influxdb(domain, pdata + nonstringpdata[:-1])
-
-
- ## processing thresholdCrossingAlert events
- if 'thresholdCrossingAlertFields' in jobj['event']:
- logger.debug('Found thresholdCrossingAlertFields')
-
- d = jobj['event']['thresholdCrossingAlertFields'].items()
- for key,val in d:
- if (key != 'additionalFields' and key != 'additionalParameters' and key != 'associatedAlertIdList') and val != "" :
- if isinstance(val, unicode):
- if key == "collectionTimestamp" or key == "eventStartTimestamp" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key,convertTimestampToInt(val[:-6], "%a, %d %b %Y %H:%M:%S"))+ ','
- else:
- pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
- else:
- nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
- elif key == 'additionalFields':
- for key2,val2 in val.items():
- if key2 == 'eventTime' :
- eventTime = convertTimestampToInt(val2)
- else:
- if val2 != "" and isinstance(val2, unicode):
- pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
- elif val2 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
- elif key == 'additionalParameters':
- for addParameter in val:
- for key2,val2 in addParameter.items():
- if key2 != "hashMap" :
- if val2 != "" and isinstance(val2, unicode):
- pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
- elif val2 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
- elif key2 == "hashMap" :
- for key3,val3 in val2.items():
- if val3 != "" and isinstance(val3, unicode):
- pdata = pdata + ',{}={}'.format(key3,val3.replace(' ','-'))
- elif val3 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key3,val3) + ','
- elif key == 'associatedAlertIdList':
- associatedAlertIdList = ""
- for associatedAlertId in val:
- associatedAlertIdList = associatedAlertIdList + associatedAlertId + "|"
- if(associatedAlertIdList != ""):
- pdata = pdata + ',{}={}'.format("associatedAlertIdList",associatedAlertIdList.replace(' ','-')[:-1])
-
- send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
-
-
- ## processing fault events
- if 'faultFields' in jobj['event']:
- logger.debug('Found faultFields')
-
- d = jobj['event']['faultFields'].items()
- for key,val in d:
- if key != 'alarmAdditionalInformation' and val != "" :
- if isinstance(val, unicode):
- pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
- else:
- nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
- elif key == 'alarmAdditionalInformation':
- for key2,val2 in val.items():
- if key2 == 'eventTime' :
- eventTime = convertTimestampToInt(val2)
+# --------------------------------------------------------------------------
+def send_to_influxdb(event, pdata):
+ url = 'http://{}/write?db=veseventsdb'.format(influxdb)
+ logger.info('Send {} to influxdb at {}: {}'.format(event, influxdb, pdata))
+ r = requests.post(url, data=pdata, headers={'Content-Type': 'text/plain'})
+ logger.info('influxdb return code {}'.format(r.status_code))
+ if r.status_code != 204:
+ logger.debug('*** Influxdb save failed, return code {} ***'.format(r.status_code))
+
+
+def process_additional_measurements(val, domain, eventId, startEpochMicrosec, lastEpochMicrosec):
+ for additionalMeasurements in val:
+ pdata = domain + ",eventId={},system={}".format(eventId, source)
+ nonstringpdata = " startEpochMicrosec={},lastEpochMicrosec={},".format(startEpochMicrosec, lastEpochMicrosec)
+ for key, val in additionalMeasurements.items():
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ elif isinstance(val, dict):
+ for key2, val2 in val.items():
+ if isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
else:
- if val2 != "" and isinstance(val2, unicode):
- pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
- elif val2 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
- send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
+def process_nonadditional_measurements(val, domain, eventId, startEpochMicrosec, lastEpochMicrosec):
+ for disk in val:
+ pdata = domain + ",eventId={},system={}".format(eventId, source)
+ nonstringpdata = " startEpochMicrosec={},lastEpochMicrosec={},".format(startEpochMicrosec, lastEpochMicrosec)
+ for key, val in disk.items():
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
- ###process heartbeat events
- if 'heartbeatFields' in jobj['event']:
- logger.debug('Found Heartbeat')
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
- d = jobj['event']['heartbeatFields'].items()
- for key,val in d:
- if key != 'additionalFields' and val != "" :
- if isinstance(val, unicode):
- pdata = pdata + ',{}={}'.format(key,val.replace(' ','-'))
- else:
- nonstringpdata = nonstringpdata + '{}={}'.format(key,val) + ','
- elif key == 'additionalFields':
- for key2,val2 in val.items():
- if key2 == 'eventTime' :
- eventTime = convertTimestampToInt(val2)
+
+def process_pnfRegistration_event(domain, jobj, pdata, nonstringpdata):
+ pdata = pdata + ",system={}".format(source)
+ for key, val in jobj.items():
+ if key != 'additionalFields' and val != "":
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
else:
- if val2 != "" and isinstance(val2, unicode):
- pdata = pdata + ',{}={}'.format(key2,val2.replace(' ','-'))
- elif val2 != "" :
- nonstringpdata = nonstringpdata + '{}={}'.format(key2,val2) + ','
-
- send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + format(eventTime))
-
-
- ## processing measurement events
- if 'measurementFields' in jobj['event']:
- logger.debug('Found measurementFields')
- d = jobj['event']['measurementFields'].items()
- nonstringKey = ["concurrentSessions","configuredEntities","meanRequestLatency","measurementFieldsVersion","measurementInterval",
- "nfcScalingMetric","numberOfMediaPortsInUse","requestRate"]
-
- pdata = pdata + ' '
- for key,val in d:
- for nonstrKey in nonstringKey:
- if key == nonstrKey:
- pdata = pdata + '{}={}'.format(key,val) + ','
-
- send_to_influxdb("fault", pdata[:-1])
-
-
- if 'measurementsForVfScalingFields' in jobj['event']:
- logger.debug('Found measurementsForVfScalingFields')
-
-# "measurementsForVfScalingFields": {
-# "additionalMeasurements": [
-# {
-# "arrayOfFields": [
-# {
-# "name": "load-longterm",
-# "value": "0.34"
-# },
-# {
-# "name": "load-shortterm",
-# "value": "0.32"
-# },
-# {
-# "name": "load-midterm",
-# "value": "0.34"
-# }
-# ],
-# "name": "load"
-# }
-# ],
-
- if 'additionalMeasurements' in jobj['event']['measurementsForVfScalingFields']:
- for meas in jobj['event']['measurementsForVfScalingFields']['additionalMeasurements']:
- name = meas['name']
- eventTime = int(float(meas['eventTime']) * float(1000000000))
-
- if name =="kernel4-filterAccounting":
- data = '{},system={}'.format(name,source)
- for field in meas['arrayOfFields']:
- if field['name'] =="ipt-packets-value":
- val=field['value']
- else:
- data = data + ",{}={}".format(field['name'],field['value'])
-
- data = data + ' ' + "ipt-packets-value=" + val + ' ' + format(eventTime)
- send_to_influxdb("iptables", data)
- else:
- pdata = '{},system={}'.format(name,source)
-
- for field in meas['arrayOfFields']:
- pdata = pdata + ",{}={}".format(field['name'],field['value'])
- #pdata = pdata + ",{}={}".format("eventTime",meas['eventTime'])
- i=pdata.find(',', pdata.find('system'))
- pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
- send_to_influxdb("systemLoad", pdata)
-
-# "cpuUsageArray": [
-# {
-# "cpuIdentifier": "15",
-# "cpuIdle": 99.8998998999,
-# "cpuUsageInterrupt": 0,
-# "cpuUsageNice": 0,
-# "cpuUsageSoftIrq": 0,
-# "cpuUsageSteal": 0,
-# "cpuUsageSystem": 0,
-# "cpuUsageUser": 0.1001001001,
-# "cpuWait": 0,
-# "percentUsage": 0.0
-# },
-
-
-
- if 'cpuUsageArray' in jobj['event']['measurementsForVfScalingFields']:
- logger.debug('Found cpuUsageArray')
- for disk in jobj['event']['measurementsForVfScalingFields']['cpuUsageArray']:
- id=disk['cpuIdentifier']
- pdata = 'cpuUsage,system={},cpu={}'.format(source,id)
- d = disk.items()
- for key,val in d:
- if key == 'eventTime':
- eventTime = int(float(val) * float(1000000000))
- elif key != 'cpuIdentifier':
- pdata = pdata + ',{}={}'.format(key,val)
-
- i=pdata.find(',', pdata.find('cpu='))
- pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
- send_to_influxdb("cpuUsage", pdata)
-
-# "diskUsageArray": [
-# {
-# "diskIdentifier": "sda",
-# "diskIoTimeLast": 0.3996139893,
-# "diskMergedReadLast": 0,
-# "diskMergedWriteLast": 26.1747155344,
-# "diskOctetsReadLast": 0,
-# "diskOctetsWriteLast": 309767.93302,
-# "diskOpsReadLast": 0,
-# "diskOpsWriteLast": 10.9893839563,
-# "diskTimeReadLast": 0,
-# "diskTimeWriteLast": 0.699324445683
-# },
-
- if 'diskUsageArray' in jobj['event']['measurementsForVfScalingFields']:
- logger.debug('Found diskUsageArray')
- for disk in jobj['event']['measurementsForVfScalingFields']['diskUsageArray']:
- id=disk['diskIdentifier']
- pdata = 'diskUsage,system={},disk={}'.format(source,id)
- d = disk.items()
- for key,val in d:
- if key == 'eventTime':
- eventTime = int(float(val) * float(1000000000))
- elif key != 'diskIdentifier':
- pdata = pdata + ',{}={}'.format(key,val)
-
- i=pdata.find(',', pdata.find('disk='))
- pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
- send_to_influxdb("diskUsage", pdata)
-
-# "memoryUsageArray": [
-# {
-# "memoryBuffered": 269056.0,
-# "memoryCached": 17636956.0,
-# "memoryFree": 244731658240,
-# "memorySlabRecl": 753160.0,
-# "memorySlabUnrecl": 210800.0,
-# "memoryUsed": 6240064.0,
-# "vmIdentifier": "opnfv01"
-# }
-# ],
-
- if 'memoryUsageArray' in jobj['event']['measurementsForVfScalingFields']:
- logger.debug('Found memoryUsageArray')
- pdata = 'memoryUsage,system={}'.format(source)
- vmid=e.event.measurementsForVfScalingFields.memoryUsageArray[0].vmIdentifier
- d = jobj['event']['measurementsForVfScalingFields']['memoryUsageArray'][0].items()
- for key,val in d:
- if key == 'eventTime':
- eventTime = int(float(val) * float(1000000000))
- elif key != 'vmIdentifier':
- pdata = pdata + ',{}={}'.format(key,val)
-
- i=pdata.find(',', pdata.find('system'))
- pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
- send_to_influxdb("memoryUsage", pdata)
-
-# "vNicPerformanceArray": [
-# {
-# "receivedDiscardedPacketsAccumulated": 0,
-# "receivedErrorPacketsAccumulated": 0,
-# "receivedOctetsAccumulated": 476.801524578,
-# "receivedTotalPacketsAccumulated": 2.90000899705,
-# "transmittedDiscardedPacketsAccumulated": 0,
-# "transmittedErrorPacketsAccumulated": 0,
-# "transmittedOctetsAccumulated": 230.100735749,
-# "transmittedTotalPacketsAccumulated": 1.20000372292,
-# "vNicIdentifier": "eno4",
-# "valuesAreSuspect": "true"
-# },
-
- if 'vNicPerformanceArray' in jobj['event']['measurementsForVfScalingFields']:
- logger.debug('Found vNicPerformanceArray')
- for vnic in jobj['event']['measurementsForVfScalingFields']['vNicPerformanceArray']:
- vnid=vnic['vNicIdentifier']
- pdata = 'vNicPerformance,system={},vnic={}'.format(vmid,vnid)
- d = vnic.items()
- for key,val in d:
- if key == 'eventTime':
- eventTime = int(float(val) * float(1000000000))
- elif key != 'vNicIdentifier':
- pdata = pdata + ',{}={}'.format(key,val)
-
- i=pdata.find(',', pdata.find('vnic'))
- pdata = pdata[:i] + ' ' + pdata[i+1:] + ' ' + format(eventTime)
- send_to_influxdb("vNicPerformance", pdata)
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+ elif key == 'additionalFields':
+ for key2, val2 in val.items():
+ if val2 != "" and isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ elif val2 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
+
+
+def process_thresholdCrossingAlert_event(domain, jobj, pdata, nonstringpdata):
+ pdata = pdata + ",system={}".format(source)
+ for key, val in jobj.items():
+ if (key != 'additionalFields' and key != 'additionalParameters' and key != 'associatedAlertIdList') and val != "":
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+ elif key == 'additionalFields':
+ for key2, val2 in val.items():
+ if val2 != "" and isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ elif val2 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+ elif key == 'additionalParameters':
+ for addParameter in val:
+ for key2, val2 in addParameter.items():
+ if key2 != "hashMap":
+ if val2 != "" and isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ elif val2 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+ elif key2 == "hashMap":
+ for key3, val3 in val2.items():
+ if val3 != "" and isinstance(val3, str):
+ pdata = pdata + ',{}={}'.format(key3, process_special_char(val3))
+ elif val3 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key3, val3)
+ elif key == 'associatedAlertIdList':
+ associatedAlertIdList = ""
+ for associatedAlertId in val:
+ associatedAlertIdList = associatedAlertIdList + associatedAlertId + "|"
+ if(associatedAlertIdList != ""):
+ pdata = pdata + ',{}={}'.format("associatedAlertIdList", process_special_char(associatedAlertIdList)[:-1])
+
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
+
+
+def process_fault_event(domain, jobj, pdata, nonstringpdata):
+ pdata = pdata + ",system={}".format(source)
+ for key, val in jobj.items():
+ if key != 'alarmAdditionalInformation' and val != "":
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+ elif key == 'alarmAdditionalInformation':
+ for key2, val2 in val.items():
+ if val2 != "" and isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ elif val2 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
+
+
+def process_heartbeat_events(domain, jobj, pdata, nonstringpdata):
+ pdata = pdata + ",system={}".format(source)
+ for key, val in jobj.items():
+ if key != 'additionalFields' and val != "":
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+ elif key == 'additionalFields':
+ for key2, val2 in val.items():
+ if val2 != "" and isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ elif val2 != "":
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
+
+
+def process_measurement_events(domain, jobj, pdata, nonstringpdata, eventId, startEpochMicrosec, lastEpochMicrosec):
+ pdata = pdata + ",system={}".format(source)
+ for key, val in jobj.items():
+ if val != "":
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ elif isinstance(val, list):
+ if key == 'additionalMeasurements':
+ process_additional_measurements(val,
+ domain + "additionalmeasurements",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif key == 'cpuUsageArray':
+ process_nonadditional_measurements(val,
+ domain + "cpuusage",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif key == 'diskUsageArray':
+ process_nonadditional_measurements(val,
+ domain + "diskusage",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif key == 'memoryUsageArray':
+ process_nonadditional_measurements(val,
+ domain + "memoryusage",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif key == 'nicPerformanceArray':
+ process_nonadditional_measurements(val,
+ domain + "nicperformance",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif key == 'loadArray':
+ process_nonadditional_measurements(val,
+ domain + "load",
+ eventId,
+ startEpochMicrosec,
+ lastEpochMicrosec)
+ elif isinstance(val, dict):
+ for key2, val2 in val.items():
+ if isinstance(val2, str):
+ pdata = pdata + ',{}={}'.format(key2, process_special_char(val2))
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key2, val2)
+ else:
+ nonstringpdata = nonstringpdata + '{}={},'.format(key, val)
+
+ send_to_influxdb(domain, pdata + nonstringpdata[:-1] + ' ' + process_time(eventTimestamp))
+
+
+def process_special_char(str):
+ for search_char, replace_char in {" ": "\ ", ",": "\,"}.items():
+ str = str.replace(search_char, replace_char)
+ return str
+
+
+def process_time(eventTimestamp):
+ eventTimestamp = str(eventTimestamp).replace(".", "")
+ while len(eventTimestamp) < 19:
+ eventTimestamp = eventTimestamp + "0"
+ return format(int(eventTimestamp))
+
+# --------------------------------------------------------------------------
+# Save event data
+# --------------------------------------------------------------------------
+
+
+def save_event_in_db(body):
+ jobj = json.loads(body)
+ global source
+ global eventTimestamp
+ source = "unknown"
+
+ domain = jobj['event']['commonEventHeader']['domain']
+ eventTimestamp = jobj['event']['commonEventHeader']['startEpochMicrosec']
+ agent = jobj['event']['commonEventHeader']['reportingEntityName'].upper()
+ if "LOCALHOST" in agent:
+ agent = "computehost"
+ source = jobj['event']['commonEventHeader']['sourceId'].upper()
+
+ # processing common header part
+ pdata = domain
+ nonstringpdata = " "
+ commonHeaderObj = jobj['event']['commonEventHeader'].items()
+ for key, val in commonHeaderObj:
+ if val != "":
+ if (key != 'internalHeaderFields'):
+ if isinstance(val, str):
+ pdata = pdata + ',{}={}'.format(key, process_special_char(val))
+ else:
+ nonstringpdata = nonstringpdata + '{}={}'.format(key, val) + ','
+ if (key == 'internalHeaderFields'):
+ for key2, val2 in val.items():
+ if val2 != "":
+ if isinstance(val2, str):
+ pdata = pdata +',{}={}'.format(key2, process_special_char(val2))
+ else:
+ nonstringpdata = nonstringpdata + '{}={}'.format(key2, val2) + ','
+
+ # processing pnfRegistration events
+ if 'pnfRegistrationFields' in jobj['event']:
+ logger.debug('Found pnfRegistrationFields')
+ process_pnfRegistration_event(domain,
+ jobj['event']['pnfRegistrationFields'],
+ pdata,
+ nonstringpdata)
+
+ # processing thresholdCrossingAlert events
+ if 'thresholdCrossingAlertFields' in jobj['event']:
+ logger.debug('Found thresholdCrossingAlertFields')
+ process_thresholdCrossingAlert_event(domain,
+ jobj['event']['thresholdCrossingAlertFields'],
+ pdata,
+ nonstringpdata)
+
+ # processing fault events
+ if 'faultFields' in jobj['event']:
+ logger.debug('Found faultFields')
+ process_fault_event(domain,
+ jobj['event']['faultFields'],
+ pdata,
+ nonstringpdata)
+
+ # process heartbeat events
+ if 'heartbeatFields' in jobj['event']:
+ logger.debug('Found Heartbeat')
+ process_heartbeat_events(domain,
+ jobj['event']['heartbeatFields'],
+ pdata,
+ nonstringpdata)
+
+ # processing measurement events
+ if 'measurementFields' in jobj['event']:
+ logger.debug('Found measurementFields')
+ process_measurement_events(domain,
+ jobj['event']['measurementFields'],
+ pdata, nonstringpdata,
+ jobj['event']['commonEventHeader']['eventId'],
+ jobj['event']['commonEventHeader']['startEpochMicrosec'],
+ jobj['event']['commonEventHeader']['lastEpochMicrosec'])
+
def test_listener(environ, start_response, schema):
'''
logger.info('============================')
logger.info('==== TEST CONTROL INPUT ====')
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# GET allows us to get the current pending request.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
if environ.get('REQUEST_METHOD') == 'GET':
start_response('200 OK', [('Content-type', 'application/json')])
yield json.dumps(pending_command_list)
return
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# Extract the content from the request.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
length = int(environ.get('CONTENT_LENGTH', '0'))
logger.debug('TestControl Content Length: {0}'.format(length))
body = environ['wsgi.input'].read(length)
logger.debug('TestControl Content Body: {0}'.format(body))
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# If we have a schema file then check that the event matches that expected.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
if (schema is not None):
logger.debug('Attempting to validate data: {0}\n'
'Against schema: {1}'.format(body, schema))
jsonschema.validate(decoded_body, schema)
logger.info('TestControl is valid!')
logger.info('TestControl:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
except jsonschema.SchemaError as e:
logger.error('TestControl Schema is not valid: {0}'.format(e))
except jsonschema.ValidationError as e:
- logger.error('TestControl input not valid: {0}'.format(e))
- logger.error('Bad JSON body decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ logger.warn('TestControl input not valid: {0}'.format(e))
+ logger.warn('Bad JSON body decoded:\n'
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
except Exception as e:
logger.error('TestControl input not valid: {0}'.format(e))
try:
decoded_body = json.loads(body)
logger.info('Valid JSON body (no schema checking) decoded:\n'
- '{0}'.format(json.dumps(decoded_body,
- sort_keys=True,
- indent=4,
- separators=(',', ': '))))
+ '{0}'.format(json.dumps(decoded_body,
+ sort_keys=True,
+ indent=4,
+ separators=(',', ': '))))
logger.info('TestControl input not checked against schema!')
except Exception as e:
logger.error('TestControl input not valid: {0}'.format(e))
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# Respond to the caller. If we received otherField 'ThrottleRequest',
# generate the appropriate canned response.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
pending_command_list = decoded_body
- logger.debug('===== TEST CONTROL END =====')
- logger.debug('============================')
+ logger.info('===== TEST CONTROL END =====')
+ logger.info('============================')
start_response('202 Accepted', [])
yield ''
+
def main(argv=None):
'''
Main function for the collector start-up.
program_version = 'v{0}'.format(__version__)
program_build_date = str(__updated__)
program_version_message = '%%(prog)s {0} ({1})'.format(program_version,
- program_build_date)
+ program_build_date)
if (__import__('__main__').__doc__ is not None):
program_shortdesc = __import__('__main__').__doc__.split('\n')[1]
else:
'''.format(program_shortdesc, str(__date__))
try:
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Setup argument parser so we can parse the command-line.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
parser = ArgumentParser(description=program_license,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--influxdb',
metavar='<section>',
help='section to use in the config file')
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Process arguments received.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
args = parser.parse_args()
verbose = args.verbose
api_version = args.api_version
config_file = args.config
config_section = args.section
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Now read the config file, using command-line supplied values as
# overrides.
- #----------------------------------------------------------------------
- defaults = {'log_file': 'collector.log',
- 'vel_port': '12233',
- 'vel_path': '',
- 'vel_topic_name': ''
- }
+ # ----------------------------------------------------------------------
overrides = {}
- config = ConfigParser.SafeConfigParser(defaults)
+ config = configparser.ConfigParser()
+ config['defaults'] = {'log_file': 'collector.log',
+ 'vel_port': '12233',
+ 'vel_path': '',
+ 'vel_topic_name': ''
+ }
config.read(config_file)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# extract the values we want.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
global influxdb
global vel_username
global vel_password
global vel_topic_name
global data_storage
-
+
influxdb = config.get(config_section, 'influxdb', vars=overrides)
log_file = config.get(config_section, 'log_file', vars=overrides)
vel_port = config.get(config_section, 'vel_port', vars=overrides)
vel_path = config.get(config_section, 'vel_path', vars=overrides)
- data_storage = config.get(config_section, 'data_storage', vars=overrides)
+ data_storage = config.get(config_section,
+ 'data_storage',
+ vars=overrides)
vel_topic_name = config.get(config_section,
'vel_topic_name',
'throttle_schema_file',
vars=overrides)
test_control_schema_file = config.get(config_section,
- 'test_control_schema_file',
- vars=overrides)
+ 'test_control_schema_file',
+ vars=overrides)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Finally we have enough info to start a proper flow trace.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
global logger
logger = logging.getLogger('monitor')
- if verbose > 0:
+ if ((verbose is not None) and (verbose > 0)):
logger.info('Verbose mode on')
logger.setLevel(logging.DEBUG)
else:
date_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
- logger.info('Started')
+ logger.info('Started')
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Log the details of the configuration.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
logger.debug('Log file = {0}'.format(log_file))
logger.debug('Influxdb server = {0}'.format(influxdb))
logger.debug('Event Listener Port = {0}'.format(vel_port))
logger.debug('Test Control JSON Schema File = {0}'.format(
test_control_schema_file))
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Perform some basic error checking on the config.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
if (int(vel_port) < 1024 or int(vel_port) > 65535):
logger.error('Invalid Vendor Event Listener port ({0}) '
'specified'.format(vel_port))
vel_path))
vel_path += '/'
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Load up the vel_schema, if it exists.
- #----------------------------------------------------------------------
- if not os.path.exists(vel_schema_file):
+ # ----------------------------------------------------------------------
+ if not os.path.exists(vel_schema_file):
logger.warning('Event Listener Schema File ({0}) not found. '
'No validation will be undertaken.'.format(
vel_schema_file))
vel_schema = json.load(open(vel_schema_file, 'r'))
logger.debug('Loaded the JSON schema file')
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
# Load up the throttle_schema, if it exists.
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
if (os.path.exists(throttle_schema_file)):
logger.debug('Loading throttle schema')
throttle_fragment = json.load(open(throttle_schema_file, 'r'))
throttle_schema.update(throttle_fragment)
logger.debug('Loaded the throttle schema')
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
# Load up the test control _schema, if it exists.
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
if (os.path.exists(test_control_schema_file)):
logger.debug('Loading test control schema')
test_control_fragment = json.load(
test_control_schema.update(test_control_fragment)
logger.debug('Loaded the test control schema')
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
# Load up the base_schema, if it exists.
- #------------------------------------------------------------------
+ # ------------------------------------------------------------------
if (os.path.exists(base_schema_file)):
logger.debug('Updating the schema with base definition')
base_schema = json.load(open(base_schema_file, 'r'))
vel_schema.update(base_schema)
logger.debug('Updated the JSON schema file')
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# We are now ready to get started with processing. Start-up the various
# components of the system in order:
#
# 1) Create the dispatcher.
# 2) Register the functions for the URLs of interest.
# 3) Run the webserver.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
root_url = '/{0}eventListener/v{1}{2}'.\
format(vel_path,
api_version,
throttle_url = '/{0}eventListener/v{1}/clientThrottlingState'.\
format(vel_path, api_version)
set_404_content(root_url)
+ global get_info
+ get_info = root_url
dispatcher = PathDispatcher()
- vendor_event_listener = partial(listener, schema = vel_schema)
+ vendor_event_listener = partial(listener, schema=vel_schema)
dispatcher.register('GET', root_url, vendor_event_listener)
dispatcher.register('POST', root_url, vendor_event_listener)
- vendor_throttle_listener = partial(listener, schema = throttle_schema)
+ vendor_throttle_listener = partial(listener, schema=throttle_schema)
dispatcher.register('GET', throttle_url, vendor_throttle_listener)
dispatcher.register('POST', throttle_url, vendor_throttle_listener)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# We also add a POST-only mechanism for test control, so that we can
# send commands to a single attached client.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
test_control_url = '/testControl/v{0}/commandList'.format(api_version)
test_control_listener = partial(test_listener,
- schema = test_control_schema)
+ schema=test_control_schema)
dispatcher.register('POST', test_control_url, test_control_listener)
dispatcher.register('GET', test_control_url, test_control_listener)
- httpd = make_server('', int(vel_port), dispatcher)
+ httpd = make_server('', int(vel_port), vendor_event_listener)
logger.info('Serving on port {0}...'.format(vel_port))
httpd.serve_forever()
return 0
except KeyboardInterrupt:
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# handle keyboard interrupt
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
logger.info('Exiting on keyboard interrupt!')
return 0
except Exception as e:
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Handle unexpected exceptions.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * ' '
logger.critical(traceback.format_exc())
return 2
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# MAIN SCRIPT ENTRY POINT.
-#------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
+
+
if __name__ == '__main__':
if TESTRUN:
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Running tests - note that doctest comments haven't been included so
# this is a hook for future improvements.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
import doctest
doctest.testmod()
if PROFILE:
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Profiling performance. Performance isn't expected to be a major
# issue, but this should all work as expected.
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
import cProfile
import pstats
profile_filename = 'collector_profile.txt'
statsfile.close()
sys.exit(0)
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
# Normal operation - call through to the main function.
- #--------------------------------------------------------------------------
+ # --------------------------------------------------------------------------
sys.exit(main())
+++ /dev/null
-NOTE: This folder has not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
-NOTE: This folder contains updates for the VES 5.0 release.
-* VNF Vendor Events ver 28.xlsx
-* AttServiceSpecAddendum-VesEventListener-EventRegistration-v1.4.docx
-* AttServiceSpecification-VesEventListener-v5.0.docx
-* CommonEventFormat_28.0.json
+NOTE: This folder contains updates for the VES 7.0 release.
+* CommonEventFormat_30.2.1_ONAP.json
-The other files in this folder have not been updated. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
+++ /dev/null
-NOTE: This folder and subfolders have not been updated since the 2016-11-23 update release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
+++ /dev/null
-# AT&T Vendor Event Listener Service - Test Collector - User Guide
-
-Introduction
-============
-
-Background
-----------
-
-This document describes how to use the Test Collector application to simulate
-the service API described in "AT&T Service Specification, Service:
-Vendor Event Listener Revision 2.11, 16-Sep-2016".
-
-Purpose
--------
-
-This User Guide is intended to enable the reader to understand how
- the Test Collector can be used to verify the operation of applications
- supporting the Vendor Event Listener API.
-
-
-Realization
-===========
-
-The realization of the Test Collector is a Python script which acts as a
-server for the Vendor Event Listener API. It uses [jsonschema](https://pypi.python.org/pypi/jsonschema)
-in order to validate the received JSON events against AT&T's published
-schema for the API.
-
-The overall system architecture is shown in Figure 1 and comprises
- three key deliverables:
-
-* The web-application itself.
-
-* A Backend service.
-
-* A validating test collector application.
-
-The Test Collector is described in more detail in the
- following sections. The other two components are described in a separate
- documents:
-
-* Reference VNF User Guide
-
-* Reference VNF Application Note
-
-Figure 1: Realization Architecture
-
-![Realization Architecture](images/architecture.png)
-
-Note that items shown in green in the diagram are existing AT&T
- systems and do not form part of the delivery.
-
-Validating Collector
---------------------
-
-The validating collector provides a basic test capability for
- the Reference VNF. The application implements the Vendor Event
- Listener API providing:
-
-- Logging of new requests.
-
-- Validating requests against the published schema.
-
-- Validating the credentials provided in the request.
-
-- Responding with a 202 Accepted for valid requests.
-
-- Test Control endpoint allowing a test harness or user to set a pending
- commandList, to be sent in response to the next event received.
-
-- Responding with a 202 Accepted plus a pending commandList.
-
-- Responding with a 401 Unauthorized error response-code and a JSON
- exception response for failed authentication.
-
-It is intended to be used in environments where the "real" AT&T
- Vendor Event Listener service is not available in order to test the
- Reference VNF or, indeed, any other software which needs to send
- events to a server.
-
-Using the Validating Collector
-==============================
-
-The test collector can be run manually, either on a Linux platform
- or a Windows PC. It is invoked with a number of command-line
- arguments:
-
-```
- C:> python collector.py --config <file>
- --section <section>
- --verbose
-```
-
-Where:
-
- - **config** defines the path to the config file to be used.
-
- - **section** defines the section in the config file to be used.
-
- - **verbose** controls the level of logging to be generated.
-
-Wherever you chose to run the Test Collector, note that the
- configuration of the backend service running on the VM generating
- the events has to match so that the events generated by the backend
- service are sent to the correct location and the Test Collector is
- listening on the correct ports and URLs. The relevant section of the
- Test Collector config file is:
-
-```
- #------------------------------------------------------------------------------
- # Details of the Vendor Event Listener REST service.
- #
- # REST resources are defined with respect to a ServerRoot:
- # ServerRoot = https://{Domain}:{Port}/{optionalRoutingPath}
- #
- # REST resources are of the form:
- # * {ServerRoot}/eventListener/v{apiVersion}
- # * {ServerRoot}/eventListener/v{apiVersion}/{topicName}
- # * {ServerRoot}/eventListener/v{apiVersion}/eventBatch
- # * {ServerRoot}/eventListener/v{apiVersion}/clientThrottlingState
- #
- # The "vel\_topic\_name" parameter is used as the "topicName" element in the path
- # and may be empty.
- #
- # Note that the path, if present, should have no leading "/" but should have a
- # training "/".
- #------------------------------------------------------------------------------
- vel_domain = 127.0.0.1
- vel_port = 30000
- vel_path = vendor_event_listener/
- vel_username = Alice
- vel_password = This isn't very secure!
- vel_topic_name = example_vnf
-```
-The equivalent section of the backend service's configuration has to
- match, or the equivalent parameters injected in the VM by the
- OpenStack metadata service have to match.
-
-When events are sent from the web application, the results of the
- validation will be displayed on stdout and be written to the log
- file specified in the configuration file.
-
-For example: A Fault event failing to validate:
-
-```
- <machine name>; - - [29/Feb/2016 10:58:28] "POST
- /vendor_event_listener/eventListener/v1/example_vnf HTTP/1.1" 204 0
- Event is not valid against schema! 'eventSeverity' is a required
- property
- Failed validating 'required' in
- schema['properties']['event']['properties']['faultFields']:
- {'description': 'fields specific to fault events',
- 'properties': {'alarmAdditionalInformation': {'description':'additional alarm information',
- 'items': {'$ref': '#/definitions/field'},
- 'type': 'array'},
- 'alarmCondition': {'description': 'alarm condition reportedby the device',
- 'type': 'string'},
- 'alarmInterfaceA': {'description': 'card, port, channel or interface name of the device generating the alarm',
- 'type': 'string'},
- 'eventSeverity': {'description': 'event severity or priority',
- 'enum': ['CRITICAL',
- 'MAJOR',
- 'MINOR',
- 'WARNING',
- 'NORMAL'],
- 'type': 'string'},
- 'eventSourceType': {'description': 'type of event source',
- 'enum': ['other(0)',
- 'router(1)',
- 'switch(2)',
- 'host(3)',
- 'card(4)',
- 'port(5)',
- 'slotThreshold(6)',
- 'portThreshold(7)',
- 'virtualMachine(8)'],
- 'type': 'string'},
- 'faultFieldsVersion': {'description': 'version of the faultFields block',
- 'type': 'number'},
- 'specificProblem': {'description': 'short description of the alarm or problem',
- 'type': 'string'},
- 'vfStatus': {'description': 'virtual function status enumeration',
- 'enum': ['Active',
- 'Idle',
- 'Preparing to terminate',
- 'Ready to terminate',
- 'Requesting termination'],
- 'type': 'string'}},
- 'required': ['alarmCondition',
- 'eventSeverity',
- 'eventSourceType',
- 'specificProblem',
- 'vfStatus'],
- 'type': 'object'}
- On instance['event']['faultFields']:
- {'alarmAdditionalInformation': [{'name': 'extra information',
- 'value': '2'},
- {'name': 'more information',
- 'value': '1'}],
- 'alarmCondition': 'alarm condition 1',
- 'eventSourceType': 'virtualMachine(8)',
- 'faultFieldsVersion': 1,
- 'specificProblem': 'problem 1',
- 'vfStatus': 'Active'}
- Bad JSON body decoded:
- {
- "event": {
- "commonEventHeader": {
- "domain": "fault",
- "eventId": "6",
- "eventType": "event type 1",
- "functionalRole": "unknown",
- "lastEpochMicrosec": 1456743510381000.0,
- "priority": "Normal",
- "reportingEntityId": "Not in OpenStack",
- "reportingEntityName": "Not in OpenStack Environment",
- "sequence": 0,
- "sourceId": "Not in OpenStack",
- "sourceName": "Not in OpenStack Environment",
- "startEpochMicrosec": 1456743510381000.0,
- "version": 1
- },
- "faultFields": {
- "alarmAdditionalInformation": [
- {
- "name": "extra information",
- "value": "2"
- },
- {
- "name": "more information",
- "value": "1"
- }
- ],
- "alarmCondition": "alarm condition 1",
- "eventSourceType": "virtualMachine(8)",
- "faultFieldsVersion": 1,
- "specificProblem": "problem 1",
- "vfStatus": "Active"
- }
- }
- }
-```
-
-Test Control Interface
-----------------------
-
-The test collector will accept any valid commandList on the Test Control interface,
-and will store it until the next event is received at the collector.
-At this point, it will send it to the event sender, and discard the pending commandList.
-
-For example, a POST of the following JSON will result in a measurement interval change
-command being sent to the sender of the next event.
-
-```
-{
- "commandList": [
- {
- "command": {
- "commandType": "measurementIntervalChange",
- "measurementInterval": 60
- }
- }
- ]
-}
-```
-
-A python script "test_control.py" provides an example of commandList injection,
-and contains various functions to generate example command lists.
-
-The test control script can be run manually, either on a Linux platform or a Windows PC.
-It is invoked with optional command-line arguments for the fqdn and port number of the
-test collector to be controlled:
-```
- C:> python test_control.py --fqdn 127.0.0.1 --port 30000
-```
+++ /dev/null
-NOTE: This folder and subfolders have not been updated since the initial release. Compatibility with the current VES specification and code has not been verified.
\ No newline at end of file
+++ /dev/null
-# Run the validating test collector.
-
-python ../../code/collector/collector.py \
- --config ../../config/collector.conf \
- --section default \
- --verbose
+++ /dev/null
-@echo off
-REM Run the validating test collector.
-
-python ..\..\code\collector\collector.py ^
- --config ..\..\config\collector.conf ^
- --section windows ^
- --verbose
# See the License for the specific language governing permissions and
# limitations under the License.
#
-#. What this is: Startup script for the OPNFV VES Collector running under docker.
-
+# What this is: Startup script for the VES Collector running under docker.
+# the variables used below are now passed in as environmental variables
+# from the docker run command.
cd /opt/ves
touch monitor.log
echo; echo "add VES dashboard to Grafana"
curl -H "Accept: application/json" -H "Content-type: application/json" \
-X POST -d @/opt/ves/Dashboard.json \
- http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db
+ http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db
-if [[ "$ves_loglevel" != "" ]]; then
- python /opt/ves/evel-test-collector/code/collector/monitor.py \
+if [ "$ves_loglevel" != "" ]; then
+ python3 /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
--influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default > /opt/ves/monitor.log 2>&1
else
- python /opt/ves/evel-test-collector/code/collector/monitor.py \
+ python3 /opt/ves/evel-test-collector/code/collector/monitor.py \
--config /opt/ves/evel-test-collector/config/collector.conf \
--influxdb $ves_influxdb_host:$ves_influxdb_port \
--section default
+++ /dev/null
-#!/bin/bash
-
-# Script to run the ves project and its dependent containers
-# Maintainer shrinivas.joshi@xoriant.com
-
-#List of containers for this project
-
-#collector -- Read the event received from ves-agent and write it to
-# influxdb
-#grafana -- Read the events written by ves-collector in influxdb and
-# show the graphs on UI
-#influxdb -- Store the events in DB sent by ves-agent
-
-#Port allotment on host system for the micro services running in docker.
-
-#Stop all containers if those are running accedently.
-
-./ves-stop.sh
-
-influx_port=3330
-grafana_port=8880
-vel_ves_port=9999
-
-#Check Docker, collectd and git is installed on the VM
-
-#get local ip address of VM from first interface
-
-
-local_ip=`/sbin/ip -o -4 addr list | grep enp | head -n 1 | awk '{print $4}' | cut -d/ -f1`
-echo -e "Binding VES Services to local ip address $local_ip \n "
-echo ""
-echo -e "--------------------------------------------------------------------\n"
-#Spin influx DB
-echo -e "Starting influxdb container on Local Port Number $influx_port. Please wait..\n"
-docker run -d -p $influx_port:8086 -v $PWD/influxdb influxdb
-if [ $? != 0 ]
-then
- exit 1
-fi
-
-sleep 5 #Give some time to spin the container and bring service up
-echo "Done."
-echo""
-echo -e "--------------------------------------------------------------------\n"
-#Spin Grafana Cotainer
-echo -e "Starting Grafana cotainer on Local port number $grafana_port. Please wait..\n"
-docker run -d -p $grafana_port:3000 grafana/grafana
-if [ $? != 0 ]
-then
- exit 1
-fi
-sleep 5 #Give some time to spin the container and bring service up
-echo "Done."
-echo ""
-echo -e "--------------------------------------------------------------------\n"
-echo ""
-echo -e "--------------------------------------------------------------------\n"
-#Spin collector container.
-echo -e "Starting ves collector container on Local port number $vel_ves_port. Please wait\n"
-docker run -d -e ves_influxdb_host=$local_ip \
- -e ves_influxdb_port=$influx_port -e ves_grafana_host=$local_ip \
- -e ves_grafana_port=$grafana_port -e ves_host=$local_ip \
- -e ves_port=$vel_ves_port -e ves_grafana_auth='admin:admin' \
- -e ves_user='user' -e ves_pass='password' -e ves_path=''\
- -e ves_topic='events' -e ves_loglevel='DEBUG' \
- -p $vel_ves_port:$vel_ves_port ves-collector
-if [ $? != 0 ]
-then
- exit 1
-fi
-sleep 6
-echo "Done."
-echo ""
-echo""
-echo -e "ves stack summary\n"
-
-echo -e "===================================================================================================================\n"
-echo ""
-echo -e "ves collector listner port: $vel_ves_port \n"
-echo -e "Grafana port: $grafana_port \n"
-echo -e "To access grafana dashboard paste url http://$local_ip:$grafana_port in web browser. "
-echo -e "Grafana username/password is admin/admin *** DO NOT CHANGE THE ADMIN PASSWORD, CLICK SKIP OPTION ***\n"
-echo ""
-echo -e "===================================================================================================================\n"
--- /dev/null
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: A Dockerfile for building a kafka server for use by the OPFNV
+# VES framework.
+#
+# Status: this is a work in progress, under test.
+#
+
+FROM ubuntu:xenial
+
+RUN apt-get update && apt-get -y upgrade
+RUN apt-get install -y default-jre python-pip wget
+# Required for kafka
+RUN pip install kafka-python
+
+RUN mkdir /opt/ves
+
+RUN cd /opt/ves; \
+wget https://archive.apache.org/dist/kafka/0.11.0.2/kafka_2.11-0.11.0.2.tgz; \
+tar -xvzf kafka_2.11-0.11.0.2.tgz; \
+sed -i -- 's/#delete.topic.enable=true/delete.topic.enable=true/' \
+ kafka_2.11-0.11.0.2/config/server.properties
+
+COPY start.sh /opt/ves/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
--- /dev/null
+default: all
+
+all:
+ docker build -t ves-kafka .
--- /dev/null
+#!/bin/bash
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Startup script for a kafka server as used by the OPNFV VES
+#. framework.
+
+echo "$zookeeper_host $zookeeper_hostname" >>/etc/hosts
+cat /etc/hosts
+cd /opt/ves
+
+sed -i "s/localhost:2181/$zookeeper_hostname:$zookeeper_port/" \
+ kafka_2.11-0.11.0.2/config/server.properties
+grep 2181 kafka_2.11-0.11.0.2/config/server.properties
+sed -i "s~#advertised.listeners=PLAINTEXT://your.host.name:9092~advertised.listeners=PLAINTEXT://$kafka_hostname:$kafka_port~" \
+ kafka_2.11-0.11.0.2/config/server.properties
+grep advertised.listeners kafka_2.11-0.11.0.2/config/server.properties
+
+kafka_2.11-0.11.0.2/bin/kafka-server-start.sh \
+ kafka_2.11-0.11.0.2/config/server.properties
+
--- /dev/null
+#!/bin/bash
+
+# Script to build ves project and its dependent containers
+# Maintainer shrinivas.joshi@xoriant.com
+
+# List of containers for this project
+
+# ves-kafka -- kafka broker to store events recieved from collectd or other similar services
+# ves-agent -- read events forom kafka and send those events to VEL port on ves-collector container
+# ves-collector -- Read the event received from ves-agent and write it to influxdb
+# grafana -- Read the events written by ves-collector in influxdb and show the graphs on UI
+# influxdb -- Store the events in DB sent by ves-agent
+# kafdrop -- UI for Kafka
+
+# Stop all containers if those are running accedently.
+
+./ves-stop.sh
+
+# Port allotment on host system for the micro services running in docker.
+
+influx_port=3330
+grafana_port=8880
+kafka_port=9092
+kafdrop_port=9000
+zookeeper_port=2181
+vel_ves_port=9999
+
+OS=`uname -s`
+# Check Docker, collectd, ip and git is installed on the VM
+
+if ! which docker > /dev/null; then
+ echo -e "Docker not found, please install docker from https://docs.docker.com/engine/install/ubuntu\n"
+ exit;
+fi
+
+if ! which collectd > /dev/null; then
+ if [ $OS = 'Darwin' ]
+ then
+ echo -e "Collectd not found, please install collectd using brew install collectd\n"
+ elif [ $OS = 'Linux' ]
+ then
+ echo -e "Collectd not found, please install collectd using sudo apt-get install -y collectd\n"
+ else
+ echo -e "Could not determine kind of system. Collectd not found, please install collectd using whatever method works.\n"
+ fi
+ exit;
+fi
+
+if ! which ip > /dev/null; then
+ if [ $OS = 'Darwin' ]
+ then
+ echo -e "ip not found, please install ip using brew install ip.\n"
+ elif [ $OS = 'Linux' ]
+ then
+ echo -e "/sbin/ip not found, please install ip using sudo apt-get install ip.\n"
+ else
+ echo -e "Could not determine kind of system. ip not found, please install ip using whatever method works.\n"
+ exit 1
+ fi
+ exit;
+fi
+
+clear
+
+#get local ip address of VM from first interface
+if [ $OS = 'Darwin' ]
+then
+ local_ip=`ip -4 addr list | grep en11 | grep inet | awk '{print $2}' | cut -d/ -f1`
+elif [ $OS = 'Linux' ]
+then
+ local_ip=`/sbin/ip -o -4 addr list | grep enp | head -n 1 | awk '{print $4}' | cut -d/ -f1`
+else
+ echo -e "Could not determine which OS this.\n"
+ exit 1
+fi
+echo -e "Binding VES Services to local ip address $local_ip \n "
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin influx DB
+echo -e "Starting influxdb container on Local Port Number $influx_port. Please wait..\n"
+docker run -d -p $influx_port:8086 -v $PWD/influxdb influxdb:1.8.5
+if [ $? != 0 ]
+then
+ exit 1
+fi
+
+sleep 5 #Give some time to spin the container and bring service up
+echo "Done."
+echo""
+echo -e "--------------------------------------------------------------------\n"
+#Spin Grafana Cotainer
+echo -e "Starting Grafana cotainer on Local port number $grafana_port. Please wait..\n"
+docker run -d -p $grafana_port:3000 grafana/grafana
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 5 #Give some time to spin the container and bring service up
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin zookeeper container
+echo -e "Starting zookeeper container on Local port number $zookeeper_port. Please wait..\n"
+docker run -d --add-host mykafka:$local_ip --add-host myzoo:$local_ip \
+ -p $zookeeper_port:2181 -p 2888:2888 -p 3888:3888 \
+ -p 8800:8080 zookeeper
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 5
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin Kafka container.
+echo -e "Starting Kafka container on Local port number $kafka_port. Please wait..\n"
+docker run -d --add-host mykafka:$local_ip -e zookeeper_host=$local_ip \
+ -e zookeeper_hostname='myzoo' -e zookeeper_port=$zookeeper_port \
+ -e kafka_hostname='mykafka' -e kafka_port=$kafka_port \
+ -p $kafka_port:$kafka_port ves-kafka
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 7
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin Kafdrop UI container (this is optional componant)
+echo -e "Starting kafdrop UI container on Local port numner $kafdrop_port. please wait..\n"
+docker run -d --add-host mykafka:$local_ip -p $kafdrop_port:9000 \
+ -e KAFKA_BROKERCONNECT=$local_ip:$kafka_port \
+ -e JVM_OPTS="-Xms64M -Xmx128M" obsidiandynamics/kafdrop:latest
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 5
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+# Spin ves-collector container.
+echo -e "Starting ves collector container on Local port number $vel_ves_port. Please wait\n"
+docker run -d -e ves_influxdb_host=$local_ip \
+ -e ves_influxdb_port=$influx_port -e ves_grafana_host=$local_ip \
+ -e ves_grafana_port=$grafana_port -e ves_host='localhost' \
+ -e ves_port=$vel_ves_port -e ves_grafana_auth='admin:admin' \
+ -e ves_user='user' -e ves_pass='password' -e ves_path=''\
+ -e ves_topic='events' -e ves_loglevel='DEBUG' \
+ -p $vel_ves_port:$vel_ves_port ves-collector
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 6
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+#Spin ves agent container.
+echo -e "Starting ves agent container. Please wait\n"
+docker run -d -e ves_kafka_host=$local_ip \
+ -e ves_kafka_hostname='mykafka' -e ves_host=$local_ip \
+ -e ves_port=$vel_ves_port -e ves_path='' \
+ -e ves_topic='events' -e ves_https='False' -e ves_user='user' \
+ -e ves_pass='password' -e ves_interval='10' \
+ -e ves_kafka_port=$kafka_port -e ves_mode='./yaml/host' \
+ -e ves_version='7' -e ves_loglevel='DEBUG' ves-agent
+if [ $? != 0 ]
+then
+ exit 1
+fi
+sleep 5
+echo "Done."
+echo ""
+echo -e "--------------------------------------------------------------------\n"
+echo""
+echo -e "ves stack summary\n"
+echo -e "===================================================================================================================\n"
+echo ""
+echo -e "Kafka port: $kafka_port \n"
+echo -e "Kafdrop port: $kafdrop_port \n"
+echo -e "ves collector listner port: $vel_ves_port \n"
+echo -e "Grafana port: $grafana_port \n"
+echo -e "To access kafdrop UI use http://$local_ip:$kafdrop_port from your web browser. \n"
+echo -e "To access grafana dashboard paste url http://$local_ip:$grafana_port in web browser. "
+echo -e "Grafana username/password is admin/admin *** DO NOT CHANGE THE ADMIN PASSWORD, CLICK SKIP OPTION ***\n"
+echo ""
+echo -e "===================================================================================================================\n"