\r
To run the solution, you need to invoke the following command\r
\r
- % docker-compose up -d ves-collector\r
- % docker-compose up -d ves-agent\r
+ % docker-compose up -d smo-collector\r
+ % docker-compose up -d agent\r
\r
or simply by the following make command\r
\r
\r
To stop the solution the following command should be invoked.\r
\r
- % docker-compose down -d ves-collector\r
- % docker-compose down -d ves-agent\r
+ % docker-compose down -d smo-collector\r
+ % docker-compose down -d agent\r
\r
or simply by the following make command\r
\r
FROM ubuntu:focal
-RUN mkdir /opt/ves
+RUN mkdir /opt/smo
RUN apt-get update && apt-get -y upgrade
RUN apt-get install -y tzdata
RUN pip3 install kafka-python pyaml
RUN pip3 install --upgrade certifi
-RUN mkdir /opt/ves/barometer
-ADD barometer /opt/ves/barometer
+RUN mkdir /opt/smo/barometer
+ADD barometer /opt/smo/barometer
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+COPY start.sh /opt/smo/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
default: all
all:
- docker build -t ves-agent .
+ docker build -t agent .
'Path': '',
'Username': 'user',
'Password': 'password',
- 'Topic': 'events',
+ 'Directory_path': 'events',
'UseHttps': False,
'SendEventInterval': 10.0,
'ApiVersion': 5,
'{}'.format('/{}'.format(self._app_config['Path']) if len(
self._app_config['Path']) > 0 else ''),
int(self._app_config['ApiVersion']), '{}'.format(
- '/{}'.format(self._app_config['Topic']) if len(
- self._app_config['Topic']) > 0 else ''))
+ '/{}'.format(self._app_config['Directory_path']) if len(
+ self._app_config['Directory_path']) > 0 else ''))
logging.info('Vendor Event Listener is at: {}'.format(server_url))
credentials = base64.b64encode('{}:{}'.format(
self._app_config['Username'],
except (HTTPError, URLError) as e:
logging.error('Vendor Event Listener is is not reachable: {}'.format(e))
except timeout:
- logging.error('Timed out - URL %s', url)
+ logging.error('socket timed out - URL %s', url)
except Exception as e:
- logging.error('Vendor Event Listener error: {}'.format(e))
-
+ logging.error('Vendor Event Listener error: {}'.format(e))
+ else:
+ logging.info('Access successful.')
+
def config(self, config):
"""VES option configuration"""
for key, value in config.items('config'):
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
help="Specify log level (default: %(default)s)",
metavar="LEVEL")
- parser.add_argument("--logfile", dest="logfile", default='ves_app.log',
+ parser.add_argument("--logfile", dest="logfile", default='agent_app.log',
help="Specify log file (default: %(default)s)",
metavar="FILE")
args = parser.parse_args()
Domain = 127.0.0.1
Path =
Port = 9999
-Topic = events
-UseHttps = False
+Directory_path = events
+UseHttps = True
Username = user
Password = password
SendEventInterval = 10
# See the License for the specific language governing permissions and
# limitations under the License.
#
-#. What this is: Startup script for the OPNFV VES Agent running under docker.
+#. What this is: Startup script for the OPNFV Agent running under docker.
-echo "Ves-agent is trying to connect Kafka Broker.."
-timeout 1m bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$ves_kafka_host/$ves_kafka_port; do sleep 2; done'
+echo "Agent is trying to connect Kafka Broker.."
+timeout 1m bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$agent_kafka_host/$agent_kafka_port; do sleep 2; done'
success=$?
if [ $success -eq 0 ]
then
exit;
fi
-echo "Ves-agent is trying to connect ves-collector.."
-timeout 1m bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$ves_host/$ves_port; do sleep 2; done'
+echo "Agent is trying to connect smo-collector.."
+timeout 1m bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$smo_collector_host/$smo_collector_port; do sleep 2; done'
success=$?
if [ $success -eq 0 ]
then
- echo "ves-collector is up.."
+ echo "smo-collector is up.."
else
- echo "No ves-collector found .. exiting container.."
+ echo "No smo-collector found .. exiting container.."
exit;
fi
-echo "$ves_kafka_host $ves_kafka_hostname" >>/etc/hosts
-echo "ves_kafka_hostname=$ves_kafka_hostname"
+echo "$agent_kafka_host $agent_kafka_host" >>/etc/hosts
+echo "agent_kafka_host =$agent_kafka_host"
echo "*** /etc/hosts ***"
cat /etc/hosts
-cd /opt/ves/barometer/3rd_party/collectd-ves-app/ves_app
-cat <<EOF >ves_app_config.conf
+cd /opt/smo/barometer/3rd_party/collectd-agent-app/agent_app
+cat <<EOF >agent_app_config.conf
[config]
-Domain = $ves_host
-Port = $ves_port
-Path = $ves_path
-Topic = $ves_topic
-UseHttps = $ves_https
-Username = $ves_user
-Password = $ves_pass
-SendEventInterval = $ves_interval
-ApiVersion = $ves_version
-KafkaPort = $ves_kafka_port
-KafkaBroker = $ves_kafka_host
+Domain = $smo_collector_host
+Port = $smo_collector_port
+Path = $smo_collector_path
+Directory_path = $smo_collector_directory_path
+UseHttps = $smo_collector_https
+Username = $smo_collector_user
+Password = $smo_collector_pass
+SendEventInterval = $agent_interval
+ApiVersion = $smo_collector_version
+KafkaPort = $agent_kafka_port
+KafkaBroker = $agent_kafka_host
EOF
-cat ves_app_config.conf
-echo "ves_mode=$ves_mode"
+cat agent_app_config.conf
+echo "agent_mode=$agent_mode"
-if [[ "$ves_loglevel" == "" ]]; then
- ves_loglevel=ERROR
+if [[ "$loglevel" == "" ]]; then
+ loglevel=ERROR
fi
-python3 ves_app.py --events-schema=$ves_mode.yaml --loglevel $ves_loglevel \
- --config=ves_app_config.conf
+python3 agent_app.py --events-schema=$agent_mode.yaml --loglevel $loglevel \
+ --config=agent_app_config.conf
-# Dump ves_app.log if the command above exits (fails)
-echo "*** ves_app.log ***"
-cat ves_app.log
+# Dump agent_app.log if the command above exits (fails)
+echo "*** agent_app.log ***"
+cat agent_app.log
+++ /dev/null
-{
-"dashboard": {
- "description": "This Dashboard provides a general overview of a host, with templating to select the hostname.",
- "editable": true,
- "gnetId": null,
- "graphTooltip": 0,
- "hideControls": false,
- "id": null,
- "links": [],
- "refresh": "10s",
- "panels": [
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "gridPos": {
- "h": 11,
- "w": 12,
- "x": 0,
- "y": 0
- },
- "hiddenSeries": false,
- "id": 3,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementload",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT moving_average(\"longTerm\", 5) AS \"alias\", moving_average(\"midTerm\", 5), moving_average(\"shortTerm\", 5) FROM \"measurementload\" WHERE (\"system\" =~ /^$host$/) AND $timeFilter GROUP BY \"system\"",
- "rawQuery": false,
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "longTerm"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Long Term"
- ],
- "type": "alias"
- }
- ],
- [
- {
- "params": [
- "midTerm"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Mid Term"
- ],
- "type": "alias"
- }
- ],
- [
- {
- "params": [
- "shortTerm"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Short Term"
- ],
- "type": "alias"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "host load",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
- ],
- "yaxes": [
- {
- "format": "short",
- "label": "Percent",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "gridPos": {
- "h": 11,
- "w": 12,
- "x": 12,
- "y": 0
- },
- "hiddenSeries": false,
- "id": 6,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "cpuIdentifier"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementcpuusage",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT mean(\"cpusystem\") FROM \"cpu\" WHERE $timeFilter GROUP BY time(1m) fill(null)",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "cpuUsageUser"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "host CPU Usage User",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
- ],
- "yaxes": [
- {
- "format": "short",
- "label": "Percent",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "gridPos": {
- "h": 12,
- "w": 12,
- "x": 0,
- "y": 11
- },
- "hiddenSeries": false,
- "id": 2,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "nicIdentifier"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementnicperformance",
- "orderByTime": "ASC",
- "policy": "default",
- "query": "SELECT moving_average(\"receivedTotalPacketsAccumulated\", 5) FROM \"measurementnicperformance\" WHERE (\"system\" =~ /^$host$/) AND $timeFilter GROUP BY \"system\", \"nicIdentifier\"",
- "rawQuery": false,
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "receivedTotalPacketsAccumulated"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Received Octets",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
- ],
- "yaxes": [
- {
- "format": "short",
- "label": "Octets/Packets",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "grid": {
- "leftLogBase": 1,
- "leftMax": null,
- "leftMin": null,
- "rightLogBase": 1,
- "rightMax": null,
- "rightMin": null
- },
- "gridPos": {
- "h": 12,
- "w": 12,
- "x": 12,
- "y": 11
- },
- "hiddenSeries": false,
- "id": 4,
- "interval": "30s",
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "rightSide": false,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 2,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "nicIdentifier"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementnicperformance",
- "orderByTime": "ASC",
- "policy": "default",
- "refId": "B",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "receivedOctetsAccumulated"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Transmitted Octets",
- "tooltip": {
- "shared": true,
- "sort": 2,
- "value_type": "individual"
- },
- "type": "graph",
- "x-axis": true,
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "y-axis": true,
- "y_formats": [
- "short",
- "short"
- ],
- "yaxes": [
- {
- "format": "short",
- "label": "Octets/Packets",
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 23
- },
- "hiddenSeries": false,
- "id": 7,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "diskIdentifier"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementdiskusage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "query": "SELECT moving_average(\"diskOpsWriteLast\", 5) FROM \"autogen\".\"diskUsage\" WHERE (\"system\" =~ /^$host$/ AND \"disk\" = 'sda') AND $timeFilter GROUP BY \"system\", \"disk\"",
- "rawQuery": false,
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "diskOpsWriteLast"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- },
- {
- "condition": "AND",
- "key": "diskIdentifier",
- "operator": "=",
- "value": "sda"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Disk Usage SDA",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 23
- },
- "hiddenSeries": false,
- "id": 8,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- },
- {
- "params": [
- "diskIdentifier"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementdiskusage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "query": "SELECT moving_average(\"diskOpsWriteLast\", 5) FROM \"autogen\".\"measurementdiskusage\" WHERE (\"system\" =~ /^$host$/ AND \"diskIdentifier\" = 'sdb') AND $timeFilter GROUP BY \"system\", \"diskIdentifier\"",
- "rawQuery": false,
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "diskOpsWriteLast"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- },
- {
- "condition": "AND",
- "key": "diskIdentifier",
- "operator": "=",
- "value": "sdb"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Disk Usage SDB",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- },
- {
- "aliasColors": {},
- "bars": false,
- "dashLength": 10,
- "dashes": false,
- "datasource": "VESEvents",
- "fieldConfig": {
- "defaults": {
- "custom": {}
- },
- "overrides": []
- },
- "fill": 1,
- "fillGradient": 0,
- "gridPos": {
- "h": 7,
- "w": 24,
- "x": 0,
- "y": 33
- },
- "hiddenSeries": false,
- "id": 5,
- "legend": {
- "alignAsTable": true,
- "avg": true,
- "current": true,
- "max": true,
- "min": true,
- "show": true,
- "sort": "current",
- "sortDesc": true,
- "total": false,
- "values": true
- },
- "lines": true,
- "linewidth": 1,
- "links": [],
- "nullPointMode": "null",
- "options": {
- "alertThreshold": true
- },
- "percentage": false,
- "pluginVersion": "7.3.2",
- "pointradius": 5,
- "points": false,
- "renderer": "flot",
- "seriesOverrides": [],
- "spaceLength": 10,
- "stack": false,
- "steppedLine": false,
- "targets": [
- {
- "alias": "",
- "dsType": "influxdb",
- "groupBy": [
- {
- "params": [
- "system"
- ],
- "type": "tag"
- }
- ],
- "measurement": "measurementmemoryusage",
- "orderByTime": "ASC",
- "policy": "autogen",
- "refId": "A",
- "resultFormat": "time_series",
- "select": [
- [
- {
- "params": [
- "memoryCached"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Memory Cached"
- ],
- "type": "alias"
- }
- ],
- [
- {
- "params": [
- "memoryUsed"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Memory Used"
- ],
- "type": "alias"
- }
- ],
- [
- {
- "params": [
- "memoryFree"
- ],
- "type": "field"
- },
- {
- "params": [
- "5"
- ],
- "type": "moving_average"
- },
- {
- "params": [
- "Memory Free"
- ],
- "type": "alias"
- }
- ]
- ],
- "tags": [
- {
- "key": "system",
- "operator": "=~",
- "value": "/^$host$/"
- }
- ]
- }
- ],
- "thresholds": [],
- "timeFrom": null,
- "timeRegions": [],
- "timeShift": null,
- "title": "Memory",
- "tooltip": {
- "shared": true,
- "sort": 0,
- "value_type": "individual"
- },
- "type": "graph",
- "xaxis": {
- "buckets": null,
- "mode": "time",
- "name": null,
- "show": true,
- "values": []
- },
- "yaxes": [
- {
- "format": "short",
- "label": null,
- "logBase": 10,
- "max": null,
- "min": null,
- "show": true
- },
- {
- "format": "short",
- "label": null,
- "logBase": 1,
- "max": null,
- "min": null,
- "show": true
- }
- ],
- "yaxis": {
- "align": false,
- "alignLevel": null
- }
- }
- ],
- "refresh": "10s",
- "schemaVersion": 26,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": [
- {
- "allValue": null,
- "current": {
- "selected": false,
- "text": "All",
- "value": "$__all"
- },
- "datasource": "VESEvents",
- "definition": "",
- "error": null,
- "hide": 0,
- "includeAll": true,
- "label": "host",
- "multi": true,
- "name": "host",
- "options": [],
- "query": "SHOW TAG VALUES WITH KEY=system",
- "refresh": 1,
- "regex": "",
- "skipUrlSync": false,
- "sort": 0,
- "tagValuesQuery": "",
- "tags": [],
- "tagsQuery": "",
- "type": "query",
- "useTags": false
- }
- ]
- },
- "time": {
- "from": "now-15m",
- "to": "now"
- },
- "timepicker": {
- "now": true,
- "refresh_intervals": [
- "10s",
- "20s",
- "30s",
- "1m"
- ],
- "time_options": [
- "5m",
- "15m",
- "1h",
- "6h",
- "12h",
- "24h",
- "2d",
- "7d",
- "30d"
- ]
- },
- "timezone": "browser",
- "title": "VES Demo",
- "version": 4
- }
-}
-
RUN pip3 install requests jsonschema elasticsearch kafka-python gevent
-RUN mkdir -p /opt/ves/certs
+RUN mkdir -p /opt/smo/certs
# Clone VES Collector
-RUN mkdir /opt/ves/evel-test-collector
-ADD evel-test-collector /opt/ves/evel-test-collector
+RUN mkdir /opt/smo/evel-test-collector
+ADD evel-test-collector /opt/smo/evel-test-collector
-COPY Dashboard.json /opt/ves/Dashboard.json
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+
+COPY start.sh /opt/smo/start.sh
+
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
default: all
all:
- docker build -t ves-collector .
+ docker build -t smo-collector .
+++ /dev/null
-{ "name":"VESEvents",
- "type":"influxdb",
- "access":"direct",
- "url":"http://127.0.0.1:3330",
- "password":"root",
- "user":"root",
- "database":"veseventsdb",
- "basicAuth":false,
- "basicAuthUser":"",
- "basicAuthPassword":"",
- "withCredentials":false,
- "isDefault":false,
- "jsonData":null
-}
DEBUG = False
PROFILE = False
-# ------------------------------------------------------------------------------
-# Address of influxdb server.
-# ------------------------------------------------------------------------------
-
-influxdb = '127.0.0.1'
-
# ------------------------------------------------------------------------------
# Credentials we expect clients to authenticate themselves with.
# ------------------------------------------------------------------------------
if (len(topic) == 0):
topic = kafka_topic
- logger.debug('Kafka broker ={} and kafka topic={}'.format(kafka_port, topic))
+ logger.debug('Kafka broker ={} and kafka topic={}'.format(kafka_server, topic))
produce_events_in_kafka(jobj, topic)
global producer
if producer is None:
logger.debug('Producer is None')
- producer = KafkaProducer(bootstrap_servers=[kafka_port],
+ producer = KafkaProducer(bootstrap_servers=[kafka_server],
value_serializer=lambda x:
dumps(x).encode('utf-8'))
producer.send(topic, value=jobj)
# ----------------------------------------------------------------------
parser = ArgumentParser(description=program_license,
formatter_class=ArgumentDefaultsHelpFormatter)
- parser.add_argument('-i', '--influxdb',
- dest='influxdb',
- default='localhost',
- help='InfluxDB server addresss')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='count',
# ----------------------------------------------------------------------
# extract the values we want.
# ----------------------------------------------------------------------
- global influxdb
global vel_username
global vel_password
global vel_topic_name
global data_storage
global elasticsearch_domain
global elasticsearch_port
- global kafka_port
+ global kafka_server
global kafka_topic
- influxdb = config.get(config_section, 'influxdb', vars=overrides)
log_file = config.get(config_section, 'log_file', vars=overrides)
vel_port = config.get(config_section, 'vel_port', vars=overrides)
vel_path = config.get(config_section, 'vel_path', vars=overrides)
- kafka_port = config.get(config_section,
- 'kafka_second_port',
+ kafka_server = config.get(config_section,
+ 'kafka_server',
vars=overrides)
kafka_topic = config.get(config_section,
'kafka_topic',
# Log the details of the configuration.
# ---------------------------------------------------------------------
logger.debug('Log file = {0}'.format(log_file))
- logger.debug('Influxdb server = {0}'.format(influxdb))
logger.debug('Event Listener Port = {0}'.format(vel_port))
logger.debug('Event Listener Path = {0}'.format(vel_path))
logger.debug('Event Listener Topic = {0}'.format(vel_topic_name))
dispatcher.register('POST', test_control_url, test_control_listener)
dispatcher.register('GET', test_control_url, test_control_listener)
- httpd = pywsgi.WSGIServer(('', int(vel_port)), vendor_event_listener, keyfile='/opt/ves/certs/vescertificate.key', certfile='/opt/ves/certs/vescertificate.crt')
+ httpd = pywsgi.WSGIServer(('', int(vel_port)), vendor_event_listener, keyfile='/opt/smo/certs/vescertificate.key', certfile='/opt/smo/certs/vescertificate.crt')
logger.info('Serving on port {0}...'.format(vel_port))
httpd.serve_forever()
vel_path =
vel_username =
vel_password =
-vel_topic_name = events
data_storage =
elasticsearch_domain =
elasticsearch_port= 9200
-kafka_second_port =
+vel_topic_name = events
+kafka_server =
kafka_topic =
#------------------------------------------------------------------------------
# See the License for the specific language governing permissions and
# limitations under the License.
#
-#. What this is: Startup script for the OPNFV VES Collector running under docker.
+#. What this is: Startup script for the OPNFV SMO Collector running under docker.
# the variables used below are now passed in as environmental variables
# from the docker run command.
-cd /opt/ves
+cd /opt/smo
touch monitor.log
config_file="evel-test-collector/config/collector.conf"
fi
sed -i -- \
- "s~log_file = /var/log/att/collector.log~log_file = /opt/ves/collector.log~" \
+ "s~log_file = /var/log/att/collector.log~log_file = /opt/smo/collector.log~" \
$config_file
-sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $ves_host/g" \
+sed -i -- "s/vel_domain = 127.0.0.1/vel_domain = $collector_host/g" \
$config_file
-sed -i -- "s/vel_port = 30000/vel_port = $ves_port/g" \
+sed -i -- "s/vel_port = 30000/vel_port = $collector_port/g" \
$config_file
-sed -i -- "s/vel_username =/vel_username = $ves_user/g" \
+sed -i -- "s/vel_username =/vel_username = $collector_user/g" \
$config_file
-sed -i -- "s/vel_password =/vel_password = $ves_pass/g" \
+sed -i -- "s/vel_password =/vel_password = $collector_pass/g" \
$config_file
-sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $ves_path~g" \
- $config_file
-sed -i -- "s~vel_topic_name = example_vnf~vel_topic_name = $ves_topic~g" \
- $config_file
-sed -i -- "/vel_topic_name = /a influxdb = $ves_influxdb_host:$ves_influxdb_port" \
+sed -i -- "s~vel_path = vendor_event_listener/~vel_path = $collector_path~g" \
$config_file
sed -i -- "s/elasticsearch_domain =/elasticsearch_domain = $elasticsearch_domain/g" \
$config_file
sed -i -- "s/data_storage =/data_storage = $data_storage/g" \
$config_file
-sed -i -- "s/kafka_second_port =/kafka_second_port = $kafka_host_2:$kafka_port_2/g" \
+sed -i -- "s/kafka_server =/kafka_server = $smo_kafka_host:$smo_kafka_port/g" \
$config_file
-sed -i -- "s/kafka_topic =/kafka_topic = $kafka_topic/g" \
+sed -i -- "s/kafka_topic =/kafka_topic = $smo_kafka_topic/g" \
$config_file
echo; echo $config_file
cat $config_file
-if [ "$ves_loglevel" != "" ]; then
- python3 /opt/ves/evel-test-collector/code/collector/monitor.py \
- --config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host:$ves_influxdb_port \
- --section default > /opt/ves/monitor.log 2>&1
+if [ "$loglevel" != "" ]; then
+ python3 /opt/smo/evel-test-collector/code/collector/monitor.py \
+ --config /opt/smo/evel-test-collector/config/collector.conf \
+ --section default > /opt/smo/monitor.log 2>&1
else
- python3 /opt/ves/evel-test-collector/code/collector/monitor.py \
- --config /opt/ves/evel-test-collector/config/collector.conf \
- --influxdb $ves_influxdb_host:$ves_influxdb_port \
+ python3 /opt/smo/evel-test-collector/code/collector/monitor.py \
+ --config /opt/smo/evel-test-collector/config/collector.conf \
--section default
fi
RUN pip3 install requests jsonschema kafka-python flask confluent-kafka
-RUN mkdir /opt/ves
+RUN mkdir /opt/smo
# Clone adapter folder
-RUN mkdir /opt/ves/adapter
-ADD adapter /opt/ves/adapter
+RUN mkdir /opt/smo/adapter
+ADD adapter /opt/smo/adapter
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+COPY start.sh /opt/smo/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
default: all
all:
- docker build -t ves-dmaap-adapter .
+ docker build -t smo-dmaap-adapter .
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config',
dest='config',
- default='/opt/ves/adapter/config/adapter.conf',
+ default='/opt/smo/adapter/config/adapter.conf',
help='Use this config file.')
parser.add_argument('-s', '--section',
dest='section',
# See the License for the specific language governing permissions and
# limitations under the License.
-cd /opt/ves
+cd /opt/smo
touch dmaap.log
config_file="adapter/config/adapter.conf"
if [ "$log_level" != "" ]; then
- python3 /opt/ves/adapter/code/dmaap_adapter.py \
- --config /opt/ves/adapter/config/adapter.conf \
- --section default > /opt/ves/dmaap.log 2>&1
+ python3 /opt/smo/adapter/code/dmaap_adapter.py \
+ --config /opt/smo/adapter/config/adapter.conf \
+ --section default > /opt/smo/dmaap.log 2>&1
else
- python3 /opt/ves/adapter/code/dmaap_adapter.py \
- --config /opt/ves/adapter/config/adapter.conf \
+ python3 /opt/smo/adapter/code/dmaap_adapter.py \
+ --config /opt/smo/adapter/config/adapter.conf \
--section default
fi
- 29000:9000
environment:
KAFKA_BROKERCONNECT: smo-kafka:29092
+ smo-dmaap-adapter:
+ container_name: smo-dmaap-adapter
+ build: ./dmaapadapter
+ image: smo-dmaap-adapter
+ networks:
+ - smo-net
+ ports:
+ - 5000:5000
+ environment:
+ kafka_host: "smo-kafka"
+ kafka_port: "29092"
+ log_level: "DEBUG"
smo-collector:
container_name: smo-collector
build: ./collector
ports:
- 9999:9999
volumes:
- - ~/ves-certificate:/opt/ves/certs
+ - ~/ves-certificate:/opt/smo/certs
environment:
- ves_influxdb_host: "smo-influxdb"
- ves_influxdb_port: "8086"
- ves_grafana_host: "smo-grafana"
- ves_grafana_port: "3000"
- data_storage: "elasticsearch"
elasticsearch_domain: "smo-elasticsearch"
- kafka_host_2: "smo-kafka"
- kafka_port_2: "29092"
- kafka_topic: "smo-events"
- ves_host: "smo-collector"
- ves_port: "9999"
- ves_grafana_auth: "admin:admin"
- ves_user: "user"
- ves_pass: "password"
- ves_path: ""
- ves_topic: "events"
- ves_loglevel: "ERROR"
+ smo_kafka_host: "smo-kafka"
+ smo_kafka_port: "29092"
+ smo_kafka_topic: "smo-events"
+ data_storage: "elasticsearch"
+ collector_host: "smo-collector"
+ collector_port: "9999"
+ collector_user: "user"
+ collector_pass: "password"
+ collector_path: ""
+ loglevel: "ERROR"
depends_on:
- smo-kafka
smo-influxdb-connector:
ports:
- 9990:9990
environment:
- ves_influxdb_host: "smo-influxdb"
- ves_influxdb_port: "8086"
- ves_loglevel: "ERROR"
- kafka_host_2: "smo-kafka"
- kafka_port_2: "29092"
+ smo_influxdb_host: "smo-influxdb"
+ smo_influxdb_port: "8086"
+ smo_kafka_host: "smo-kafka"
+ smo_kafka_port: "29092"
+ loglevel: "ERROR"
depends_on:
- smo-kafka
- smo-influxdb
- agent-net
restart: always
environment:
- ves_kafka_host: "agent-kafka"
- ves_kafka_hostname: "agent-kafka"
- ves_host: "smo-collector"
- ves_port: "9999"
- ves_path: ""
- ves_topic: "events"
- ves_https: "True"
- ves_user: "user"
- ves_pass: "password"
- ves_interval: "10"
- ves_kafka_port: "9092"
- ves_mode: "./yaml/host"
- ves_version: "5"
- ves_loglevel: "ERROR"
+ smo_collector_host: "smo-collector"
+ smo_collector_port: "9999"
+ smo_collector_path: ""
+ smo_collector_directory_path: "events"
+ smo_collector_https: "True"
+ smo_collector_user: "user"
+ smo_collector_pass: "password"
+ smo_collector_version: "5"
+ agent_interval: "10"
+ agent_kafka_port: "9092"
+ agent_kafka_host: "agent-kafka"
+ agent_mode: "./yaml/host"
+ loglevel: "ERROR"
depends_on:
- agent-kafka
- smo-collector
- smo-dmaap-adapter:
- container_name: smo-dmaap-adapter
- build: ./dmaapadapter
- image: smo-dmaap-adapter
- networks:
- - smo-net
- ports:
- - 5000:5000
- environment:
- kafka_host: "smo-kafka"
- kafka_port: "29092"
- log_level: "DEBUG"
smo-elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.11.1
container_name: smo-elasticsearch
build: ./postconfig
image: smo-post-config
environment:
- ves_influxdb_host: "smo-influxdb"
- ves_influxdb_port: "8086"
- ves_grafana_host: "smo-grafana"
- ves_grafana_port: "3000"
- ves_grafana_auth: "admin:admin"
+ smo_influxdb_host: "smo-influxdb"
+ smo_influxdb_port: "8086"
+ smo_grafana_host: "smo-grafana"
+ smo_grafana_port: "3000"
+ smo_grafana_auth: "admin:admin"
depends_on:
- smo-grafana
networks:
RUN pip3 install requests confluent-kafka
# Clone influxdb-connector
-RUN mkdir -p /opt/ves/influxdb-connector
-ADD influxdb-connector /opt/ves/influxdb-connector
+RUN mkdir -p /opt/smo/influxdb-connector
+ADD influxdb-connector /opt/smo/influxdb-connector
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+COPY start.sh /opt/smo/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
# See the License for the specific language governing permissions and
# limitations under the License.
#
+import sys
+import os
import platform
import json
import logging
import configparser
import logging.handlers
import requests
+import urllib.request as url
from confluent_kafka import Consumer, KafkaError
# ------------------------------------------------------------------------------
logger = None
-
def send_to_influxdb(event, pdata):
- url = 'http://{}/write?db=veseventsdb'.format(influxdb)
+ url = 'http://{}/write?db=eventsdb'.format(influxdb)
logger.debug('Send {} to influxdb at {}: {}'.format(event, influxdb, pdata))
r = requests.post(url, data=pdata, headers={'Content-Type': 'text/plain'})
logger.info('influxdb return code {}'.format(r.status_code))
if r.status_code != 204:
- logger.debug('*** Influxdb save failed, return code {} ***'.format(r.status_code))
-
+ logger.debug('*** Influxdb save failed, return code {} ***'.format(r.status_code))
def process_additional_measurements(val, domain, eventId, startEpochMicrosec, lastEpochMicrosec):
for additionalMeasurements in val:
# Setup argument parser so we can parse the command-line.
# ----------------------------------------------------------------------
parser = ArgumentParser(description='',
- formatter_class=ArgumentDefaultsHelpFormatter)
+ formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--influxdb',
- dest='influxdb',
- default='localhost',
- help='InfluxDB server addresss')
+ dest='influxdb',
+ default='localhost',
+ help='InfluxDB server addresss')
parser.add_argument('-v', '--verbose',
- dest='verbose',
- action='count',
- help='set verbosity level')
+ dest='verbose',
+ action='count',
+ help='set verbosity level')
parser.add_argument('-c', '--config',
- dest='config',
- default='/opt/ves/connector/config/consumer.conf',
- help='Use this config file.',
- metavar='<file>')
+ dest='config',
+ default='/opt/smo/connector/config/consumer.conf',
+ help='Use this config file.',
+ metavar='<file>')
parser.add_argument('-s', '--section',
- dest='section',
- default='default',
- metavar='<section>',
- help='section to use in the config file')
+ dest='section',
+ default='default',
+ metavar='<section>',
+ help='section to use in the config file')
# ----------------------------------------------------------------------
# Process arguments received.
}
config.read(config_file)
+
# ----------------------------------------------------------------------
# extract the values we want.
# ----------------------------------------------------------------------
influxdb = config.get(config_section, 'influxdb', vars=overrides)
log_file = config.get(config_section, 'log_file', vars=overrides)
- kafka_server = config.get(config_section, 'kafka_server', vars=overrides)
+ kafka_server=config.get(config_section,'kafka_server',
+ vars=overrides)
# ----------------------------------------------------------------------
# Finally we have enough info to start a proper flow trace.
else:
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(log_file,
- maxBytes=1000000,
- backupCount=10)
+ maxBytes=1000000,
+ backupCount=10)
if (platform.system() == 'Windows'):
date_format = '%Y-%m-%d %H:%M:%S'
else:
# kafka Consumer code .
# ----------------------------------------------------------------------
+
settings = {
'bootstrap.servers': kafka_server,
'group.id': 'mygroup',
c = Consumer(settings)
- c.subscribe(['measurement', 'pnfregistration',
- 'fault', 'thresholdcrossingalert', 'heartbeat'])
+ c.subscribe(['measurement','pnfregistration',
+ 'fault','thresholdcrossingalert','heartbeat'])
try:
while True:
save_event_in_db(msg.value())
elif msg.error().code() == KafkaError._PARTITION_EOF:
logger.error('End of partition reached {0}/{1}'
- .format(msg.topic(), msg.partition()))
+ .format(msg.topic(), msg.partition()))
else:
logger.error('Error occured: {0}'.format(msg.error().str()))
finally:
c.close()
-
if __name__ == '__main__':
main()
[default]
-log_file = /opt/ves/influxdbconnector.log
+log_file = /opt/smo/influxdbconnector.log
kafka_server =
influxdb =
# See the License for the specific language governing permissions and
# limitations under the License.
#
-cd /opt/ves
+cd /opt/smo
touch monitor.log
config_file="influxdb-connector/config/influxdb_connector.conf"
-sed -i -- "s/influxdb =/influxdb = $ves_influxdb_host:$ves_influxdb_port/g" \
+sed -i -- "s/influxdb =/influxdb = $smo_influxdb_host:$smo_influxdb_port/g" \
$config_file
-sed -i -- "s/kafka_server =/kafka_server = $kafka_host_2:$kafka_port_2/g" \
+sed -i -- "s/kafka_server =/kafka_server = $smo_kafka_host:$smo_kafka_port/g" \
$config_file
echo; echo $config_file
cat $config_file
-echo; echo "wait for InfluxDB API at $ves_influxdb_host:$ves_influxdb_port"
+echo; echo "wait for InfluxDB API at $smo_influxdb_host:$smo_influxdb_port"
STARTTIME=$(date +%s)
max_time=60
-while ! curl http://$ves_influxdb_host:$ves_influxdb_port/ping ;
+while ! curl http://$smo_influxdb_host:$smo_influxdb_port/ping ;
do
ELAPSED_TIME=$(($(date +%s) - $STARTTIME))
if [ $ELAPSED_TIME -ge $max_time ]; then
sleep 10
done
echo "Done."
-echo; echo "setup veseventsdb in InfluxDB"
+echo; echo "setup eventsdb in InfluxDB"
# TODO: check if pre-existing and skip
-curl -X POST http://$ves_influxdb_host:$ves_influxdb_port/query \
- --data-urlencode "q=CREATE DATABASE veseventsdb"
+curl -X POST http://$smo_influxdb_host:$smo_influxdb_port/query \
+ --data-urlencode "q=CREATE DATABASE eventsdb"
-if [ "$ves_loglevel" != "" ]; then
- python3 /opt/ves/influxdb-connector/code/influxdb_connector.py \
- --config /opt/ves/influxdb-connector/config/influxdb_connector.conf \
- --influxdb $ves_influxdb_host:$ves_influxdb_port \
- --section default > /opt/ves/monitor.log 2>&1
+if [ "$loglevel" != "" ]; then
+ python3 /opt/smo/influxdb-connector/code/influxdb_connector.py \
+ --config /opt/smo/influxdb-connector/config/influxdb_connector.conf \
+ --influxdb $smo_influxdb_host:$smo_influxdb_port \
+ --section default > /opt/smo/monitor.log 2>&1
else
- python3 /opt/ves/influxdb-connector/code/influxdb_connector.py \
- --config /opt/ves/influxdb-connector/config/influxdb_connector.conf \
- --influxdb $ves_influxdb_host:$ves_influxdb_port \
+ python3 /opt/smo/influxdb-connector/code/influxdb_connector.py \
+ --config /opt/smo/influxdb-connector/config/influxdb_connector.conf \
+ --influxdb $smo_influxdb_host:$smo_influxdb_port \
--section default
fi
# Required for kafka
RUN pip install kafka-python
-RUN mkdir /opt/ves
+RUN mkdir /opt/smo
-RUN cd /opt/ves; \
+RUN cd /opt/smo; \
wget https://archive.apache.org/dist/kafka/0.11.0.2/kafka_2.11-0.11.0.2.tgz; \
tar -xvzf kafka_2.11-0.11.0.2.tgz; \
sed -i -- 's/#delete.topic.enable=true/delete.topic.enable=true/' \
kafka_2.11-0.11.0.2/config/server.properties
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+COPY start.sh /opt/smo/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
default: all
all:
- docker build -t ves-kafka .
+ docker build -t agent-kafka .
echo "$zookeeper_host $zookeeper_hostname" >>/etc/hosts
cat /etc/hosts
-cd /opt/ves
+cd /opt/smo
sed -i "s/localhost:2181/$zookeeper_hostname:$zookeeper_port/" \
kafka_2.11-0.11.0.2/config/server.properties
RUN apt-get install -y git curl
-RUN mkdir /opt/ves
+RUN mkdir /opt/smo
-RUN mkdir /opt/ves/grafana
-ADD grafana /opt/ves/grafana
+RUN mkdir /opt/smo/grafana
+ADD grafana /opt/smo/grafana
-COPY start.sh /opt/ves/start.sh
-ENTRYPOINT ["/bin/bash", "/opt/ves/start.sh"]
+COPY start.sh /opt/smo/start.sh
+ENTRYPOINT ["/bin/bash", "/opt/smo/start.sh"]
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"bars": false,
"dashLength": 10,
"dashes": false,
- "datasource": "VESEvents",
+ "datasource": "Events",
"fieldConfig": {
"defaults": {
"custom": {}
"text": "All",
"value": "$__all"
},
- "datasource": "VESEvents",
+ "datasource": "Events",
"definition": "",
"error": null,
"hide": 0,
]
},
"timezone": "browser",
- "title": "VES Demo",
+ "title": "Events Demo",
"version": 4
}
}
-{ "name":"VESEvents",
+{ "name":"Events",
"type":"influxdb",
"access":"direct",
"url":"http://127.0.0.1:3330",
"password":"root",
"user":"root",
- "database":"veseventsdb",
+ "database":"eventsdb",
"basicAuth":false,
"basicAuthUser":"",
"basicAuthPassword":"",
# See the License for the specific language governing permissions and
# limitations under the License.
-cd /opt/ves
+cd /opt/smo
sleep 10
-
echo; echo "Wait for Grafana API to be active"
STARTTIME=$(date +%s)
max_time=60
-while ! curl http://$ves_grafana_host:$ves_grafana_port/ping ;
+while ! curl http://$smo_grafana_host:$smo_grafana_port/ping ;
do
ELAPSED_TIME=$(($(date +%s) - $STARTTIME))
if [ $ELAPSED_TIME -ge $max_time ]; then
sleep 10
done
echo "Done."
-echo; echo "add VESEvents datasource to Grafana"
+echo; echo "add Events datasource to Grafana"
# TODO: check if pre-existing and skip
-cat <<EOF >/opt/ves/grafana/datasource.json
-{ "name":"VESEvents",
+cat <<EOF >/opt/smo/grafana/datasource.json
+{ "name":"Events",
"type":"influxdb",
"access":"direct",
- "url":"http://$ves_influxdb_host:$ves_influxdb_port",
+ "url":"http://$smo_influxdb_host:$smo_influxdb_port",
"password":"root",
"user":"root",
- "database":"veseventsdb",
+ "database":"eventsdb",
"basicAuth":false,
"basicAuthUser":"",
"basicAuthPassword":"",
EOF
curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST -d @/opt/ves/grafana/datasource.json \
- http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/datasources
+ -X POST -d @/opt/smo/grafana/datasource.json \
+ http://$smo_grafana_auth@$smo_grafana_host:$smo_grafana_port/api/datasources
echo; echo "add VES dashboard to Grafana"
curl -H "Accept: application/json" -H "Content-type: application/json" \
- -X POST -d @/opt/ves/grafana/dashboard.json \
- http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/dashboards/db
+ -X POST -d @/opt/smo/grafana/dashboard.json \
+ http://$smo_grafana_auth@$smo_grafana_host:$smo_grafana_port/api/dashboards/db
project: smo/ves
ref: I152b0ad5a7b6676eef702e3c3811c2f381b0f4f8
containers:
- - name: ves-influxdb
+ - name: smo-influxdb
version: 1.8.5
- - name: ves-grafana
+ - name: smo-grafana
version: 7.5.11
- name: smo-zookeeper
version: 5.5.6
version: 5.5.6
- name: smo-kafdrop
version: 3.27.0
- - name: ves-dmaap-adapter
+ - name: smo-dmaap-adapter
version: 1.0.0
- - name: ves-collector
+ - name: smo-collector
version: 2.0.1
+ - name: smo-post-config
+ version: 1.0.0
+ - name: smo-influxdb-connector
+ version: 1.0.0
+ - name: smo-elasticsearch
+ version: 7.11.1