From 0db2760f4162fe223e992d00e0dfa2e839a01900 Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Thu, 4 Aug 2022 09:05:20 +0800 Subject: [PATCH 01/16] Helm: Update default container port to 30022 Issue-ID: INF-280 Signed-off-by: Zhang Rong(Jon) Change-Id: Ieb4a92c07a8115929dbac507e8611ba412a18e47 --- o2common/config/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/o2common/config/config.py b/o2common/config/config.py index 64bf3eb..6e5d19f 100644 --- a/o2common/config/config.py +++ b/o2common/config/config.py @@ -214,7 +214,7 @@ def get_helmcli_access(): host = "127.0.0.1" if host_external is None or host_external == '' \ else host_external port = "10022" if host_external is None or host_external == '' \ - else "50022" + else "30022" helm_host_with_port = host+':'+port helm_user = 'helm' -- 2.16.6 From c76580a35c3461254e1d8c91025019f52f723021 Mon Sep 17 00:00:00 2001 From: Jackie Huang Date: Tue, 11 Oct 2022 20:43:21 +0800 Subject: [PATCH 02/16] docs: workaround the bug with unordered lists docutils 0.17 introduced new semantic tags which generated an unexpected behavior with unordered lists. The workaround is to pin the versions of sphinx_rtd_theme and docutils. Ref ticket: https://jira.linuxfoundation.org/plugins/servlet/desk/portal/2/IT-24616 Issue-ID: INF-313 Signed-off-by: Jackie Huang Change-Id: I603ab744aa37e9e9c77d7faa2de937ed1d9df5ed --- docs/requirements-docs.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 09a0c1c..5d3a810 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -1,5 +1,6 @@ sphinx -sphinx-rtd-theme +sphinx-rtd-theme>=1.0.0 +docutils<0.17 sphinxcontrib-httpdomain recommonmark lfdocs-conf -- 2.16.6 From d2f6cc674bf3623caf114a8d7709e70d55ec9340 Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Wed, 19 Oct 2022 12:12:59 +0800 Subject: [PATCH 03/16] INF-303 Add Infrastructure Monitoring Fault Service; INF-305 update inventory api name 1. Add Infrastructure Monitoring Fault Service 2. Update the infrastructure inventory API service path Issue-ID: INF-303 Issue-ID: INF-305 Signed-off-by: Zhang Rong(Jon) Change-Id: I38ac307fb5e1102c027b7f0b1061f97cfe47277e --- Dockerfile | 2 + Dockerfile.localtest | 4 +- README.md | 5 +- configs/alarm.yaml | 40 + configs/config.yaml | 0 configs/events.yaml | 3938 ++++++++++++++++++++ o2app/adapter/unit_of_work.py | 20 +- o2app/bootstrap.py | 4 + o2app/entrypoints/flask_application.py | 4 + o2app/entrypoints/redis_eventconsumer.py | 17 +- o2app/entrypoints/resource_watcher.py | 10 + o2app/service/handlers.py | 11 +- o2common/config/config.py | 50 +- o2common/service/watcher/base.py | 3 +- o2common/service/watcher/worker.py | 3 +- o2ims/adapter/alarm_loader.py | 42 + o2ims/adapter/alarm_repository.py | 114 + o2ims/adapter/clients/alarm_dict_client.py | 161 + o2ims/adapter/clients/fault_client.py | 191 + o2ims/adapter/orm.py | 69 + o2ims/domain/alarm_obj.py | 188 + o2ims/domain/alarm_repo.py | 221 ++ o2ims/domain/commands.py | 17 +- o2ims/domain/events.py | 8 + o2ims/service/auditor/alarm_handler.py | 230 ++ o2ims/service/command/notify_alarm_handler.py | 67 + o2ims/service/event/alarm_event.py | 30 + o2ims/service/watcher/alarm_watcher.py | 92 + o2ims/views/__init__.py | 10 +- o2ims/views/alarm_dto.py | 69 + o2ims/views/alarm_route.py | 103 + o2ims/views/alarm_view.py | 71 + o2ims/views/api_ns.py | 4 + requirements-stx.txt | 3 +- requirements-test.txt | 1 + tests/conftest.py | 3 + .../test_clientdriver_stx_fault.py | 89 + tests/unit/test_alarm.py | 322 ++ tests/unit/test_ocloud.py | 2 +- tests/unit/test_provision.py | 26 +- 40 files changed, 6211 insertions(+), 33 deletions(-) create mode 100644 configs/alarm.yaml create mode 100644 configs/config.yaml create mode 100755 configs/events.yaml create mode 100644 o2ims/adapter/alarm_loader.py create mode 100644 o2ims/adapter/alarm_repository.py create mode 100644 o2ims/adapter/clients/alarm_dict_client.py create mode 100644 o2ims/adapter/clients/fault_client.py create mode 100644 o2ims/domain/alarm_obj.py create mode 100644 o2ims/domain/alarm_repo.py create mode 100644 o2ims/service/auditor/alarm_handler.py create mode 100644 o2ims/service/command/notify_alarm_handler.py create mode 100644 o2ims/service/event/alarm_event.py create mode 100644 o2ims/service/watcher/alarm_watcher.py create mode 100644 o2ims/views/alarm_dto.py create mode 100644 o2ims/views/alarm_route.py create mode 100644 o2ims/views/alarm_view.py create mode 100644 tests/integration-ocloud/test_clientdriver_stx_fault.py create mode 100644 tests/unit/test_alarm.py diff --git a/Dockerfile b/Dockerfile index 8c33fbe..c3aba8d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -15,6 +15,8 @@ RUN pip install -e /distcloud-client/distributedcloud-client # in case git repo is not accessable # RUN git clone --depth 1 --branch master https://github.com/cloudify-incubator/cloudify-helm-plugin.git /helmsdk +RUN git clone --depth 1 --branch master https://opendev.org/starlingx/fault.git /faultclient +RUN pip install -e /faultclient/python-fmclient/fmclient/ COPY requirements.txt /tmp/ diff --git a/Dockerfile.localtest b/Dockerfile.localtest index a46b178..02ee361 100644 --- a/Dockerfile.localtest +++ b/Dockerfile.localtest @@ -7,9 +7,11 @@ RUN apt-get update && apt-get install -y git gcc \ RUN mkdir -p /cgtsclient && mkdir -p /distcloud-client COPY temp/config /cgtsclient/ COPY temp/distcloud-client /distcloud-client/ +COPY temp/fault /faultclient/ RUN pip install -e cgtsclient/sysinv/cgts-client/cgts-client/ \ - && pip install -e /distcloud-client/distributedcloud-client + && pip install -e /distcloud-client/distributedcloud-client \ + && pip install -e /faultclient/python-fmclient/fmclient/ # in case git repo is not accessable COPY requirements.txt constraints.txt requirements-test.txt /tmp/ diff --git a/README.md b/README.md index e2e74e0..ab9166e 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,9 @@ cloned into temp before docker building ```sh mkdir -p temp cd temp -git clone --depth 1 --branch master https://opendev.org/starlingx/config.git -git clone --depth 1 --branch master https://opendev.org/starlingx/distcloud-client.git +git clone --branch master https://opendev.org/starlingx/config.git +git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/distcloud-client.git +git clone --depth 1 --branch master https://opendev.org/starlingx/fault.git cd config git checkout bca406d1 patch -p1 < ../../cgtsclient-insecure.patch diff --git a/configs/alarm.yaml b/configs/alarm.yaml new file mode 100644 index 0000000..faa76a8 --- /dev/null +++ b/configs/alarm.yaml @@ -0,0 +1,40 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dictionary: + pserver: + version: 0.1 + alarmDefinition: [ + "100.104", "100.105" + ] + pserver_cpu: + version: 0.1 + alarmDefinition: [ + "100.101" + ] + pserver_mem: + version: 0.1 + alarmDefinition: [ + "100.103" + ] + pserver_ethernet: + version: 0.1 + alarmDefinition: [ + "100.102" + ] + pserver_if: + version: 0.1 + alarmDefinition: [ + + ] diff --git a/configs/config.yaml b/configs/config.yaml new file mode 100644 index 0000000..e69de29 diff --git a/configs/events.yaml b/configs/events.yaml new file mode 100755 index 0000000..d15a423 --- /dev/null +++ b/configs/events.yaml @@ -0,0 +1,3938 @@ +--- + +# +# Copyright (c) 2013-2021 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################ +# +# ALARM & CUSTOMER LOG DOCUMENTATION +# +############################################################################ + +############################################################################ +# +# Record Format ... for documentation +# +# 100.001: +# Type: < Alarm | Log > +# Description: < yaml string > +# OR +# [ < yaml string >, // list of yaml strings +# < yaml string > ] +# OR +# critical: < yaml string > // i.e. dictionary of yaml strings indexed by severity +# major: < yaml string > +# minor: < yaml string > +# warning: < yaml string > +# Entity_Instance_ID: < yaml string ... e.g. host=.interface= > +# OR +# [ < yaml string >, // list of yaml strings +# < yaml string > ] +# Severity: < critical | major | minor | warning > +# OR +# [ critical, major ] // list of severity values +# Proposed_Repair_Action: < yaml string > // NOTE ALARM ONLY FIELD +# OR +# critical: < yaml string > // i.e. dictionary of yaml strings indexed by severity +# major: < yaml string > +# minor: < yaml string > +# warning: < yaml string > +# Maintenance_Action: < yaml string > // NOTE ALARM ONLY FIELD +# OR +# critical: < yaml string > // i.e. dictionary of yaml strings indexed by severity +# major: < yaml string > +# minor: < yaml string > +# warning: < yaml string > +# Inhibit_Alarms: < True | False > // NOTE ALARM ONLY FIELD +# Alarm_Type: < operational-violation | ... > +# Probable_Cause: < timing-problem | ... > +# OR +# [ < timing-problem | ... >, // list of probable-causes +# < timing-problem | ... > ] +# Service_Affecting: < True | False > +# Suppression: < True | False > // NOTE ALARM ONLY FIELD +# Management_Affecting_Severity: < none | critical | major | minor | warning > +# // lowest alarm level of this type that will block forced upgrades & orchestration actions +# Degrade_Affecting_Severity: < none | critical | major | minor > +# // lowest alarm level of this type sets a host to 'degraded' +# +# +# Other Notes: +# - use general record format above +# - the only dictionaries allowed are ones indexed by severity +# - if there are multiple lists in a record, +# then they should all have the same # of items and corresponding list items represent instance of alarm +# - if you can't describe the alarm/log based on the above rules, +# then you can use a multi-line string format +# - DELETING alarms from events.yaml: alarms should only be deleted when going to a new Titanium Cloud release +# - if all possible alarm severities are mgmt affecting, the convention is to +# use 'warning' as the Management_Affecting_Severity, even if warning is not a possible severity for that alarm +# +# Testing: +# - Testing of events.yaml can be done by running regular make command +# and specifying fm-doc: +# nice -n 20 ionice -c Idle make -C build fm-doc.rebuild +# - When building, events.yaml will be parsed for correct format, and also +# to ensure that Alarm IDs defined in constants.py and fmAlarm.h are +# listed in events.yaml +# +############################################################################ + + +#--------------------------------------------------------------------------- +# Monitored Resource Alarms +#--------------------------------------------------------------------------- + + +100.101: + Type: Alarm + Description: |- + Platform CPU threshold exceeded; threshold x%, actual y% . + CRITICAL @ 95% + MAJOR @ 90% + Entity_Instance_ID: host= + Severity: [critical, major] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: major + Degrade_Affecting_Severity: critical + +100.102: + Type: Alarm + Description: |- + VSwitch CPU threshold exceeded; threshold x%, actual y% . + CRITICAL @ 95% + MAJOR @ 90% + MINOR @ 80% + Entity_Instance_ID: host= + Severity: [critical, major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +100.103: + Type: Alarm + Description: |- + Memory threshold exceeded; threshold x%, actual y% . + CRITICAL @ 90% + MAJOR @ 80% + Entity_Instance_ID: |- + host= + OR + host=.memory=total + OR + host=.memory=platform + OR + host=.numa=node + Severity: [critical, major] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support; may require additional memory on Host." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: critical + +100.104: # NOTE This should really be split into two different Alarms. + Type: Alarm + Description: |- + host=.filesystem= + File System threshold exceeded; threshold x%, actual y% . + CRITICAL @ 90% + MAJOR @ 80% + OR + host=.volumegroup= + Monitor and if condition persists, consider adding additional physical volumes to the volume group. + Entity_Instance_ID: |- + host=.filesystem= + OR + host=.volumegroup= + Severity: [critical, major] + Proposed_Repair_Action: "Reduce usage or resize filesystem." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: critical + Degrade_Affecting_Severity: critical + +100.105: + Type: Alarm + Description: |- + Filesystem Alarm Condition: + filesystem is not added on both controllers and/or does not have the same size: . + Entity_Instance_ID: fs_name= + Severity: critical + Proposed_Repair_Action: "Add image-conversion filesystem on both controllers. + Consult the System Administration Manual for more details. + If problem persists, contact next level of support." + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: configuration-or-customization-error + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: major + Degrade_Affecting_Severity: none + +#-------- +# 100.105: Retired (with R2 release): previously monitored /etc/nova/instances +# NFS mount from controller to computes +#-------- + +100.106: + Type: Alarm + Description: "'OAM' Port failed." + Entity_Instance_ID: host=.port= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.107: + Type: Alarm + Description: |- + 'OAM' Interface degraded. + OR + 'OAM' Interface failed. + Entity_Instance_ID: host=.interface= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.108: + Type: Alarm + Description: "'MGMT' Port failed." + Entity_Instance_ID: host=.port= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.109: + Type: Alarm + Description: |- + 'MGMT' Interface degraded. + OR + 'MGMT' Interface failed. + Entity_Instance_ID: host=.interface= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.110: + Type: Alarm + Description: "'CLUSTER-HOST' Port failed." + Entity_Instance_ID: host=.port= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.111: + Type: Alarm + Description: |- + 'CLUSTER-HOST' Interface degraded. + OR + 'CLUSTER-HOST' Interface failed. + Entity_Instance_ID: host=.interface= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +100.112: + Type: Alarm + Description: "'DATA-VRS' Port down." + Entity_Instance_ID: host=.port= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: major + +100.113: + Type: Alarm + Description: |- + 'DATA-VRS' Interface degraded. + OR + 'DATA-VRS' Interface down. + Entity_Instance_ID: host=.interface= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: major + +100.114: + Type: Alarm + Description: + major: "NTP configuration does not contain any valid or reachable NTP servers." + minor: "NTP address is not a valid or a reachable NTP server." + Entity_Instance_ID: + major: host=.ntp + minor: host=.ntp= + Severity: [major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: none + Inhibit_Alarms: + Alarm_Type: communication + Probable_Cause: unknown + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +100.115: + Type: Alarm + Description: "VSwitch Memory Usage, processor threshold exceeded; threshold x%, actual y% ." + Entity_Instance_ID: host=.processor= + Severity: [critical, major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: critical + +100.116: + Type: Alarm + Description: "Cinder LVM Thinpool Usage threshold exceeded; threshold x%, actual y% ." + Entity_Instance_ID: host= + Severity: [critical, major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: critical + +100.117: + Type: Alarm + Description: "Nova LVM Thinpool Usage threshold exceeded; threshold x%, actual y% ." + Entity_Instance_ID: host= + Severity: [critical, major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + critical: degrade + major: degrade + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: major + Degrade_Affecting_Severity: critical + +100.118: + Type: Alarm + Description: Controller cannot establish connection with remote logging server. + Entity_Instance_ID: host= + Severity: minor + Proposed_Repair_Action: "Ensure Remote Log Server IP is reachable from Controller through OAM interface; otherwise contact next level of support." + Maintenance_Action: none + Inhibit_Alarms: False + Alarm_Type: communication + Probable_Cause: communication-subsystem-failure + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +100.119: + Type: Alarm + Description: |- + does not support the provisioned PTP mode + OR + PTP clocking is out-of-tolerance + OR + is not locked to remote PTP Grand Master + OR + GNSS signal loss state: + OR + 1PPS signal loss state: + Entity_Instance_ID: |- + host=.ptp + OR + host=.ptp=no-lock + OR + host=.ptp=.unsupported=hardware-timestamping + OR + host=.ptp=.unsupported=software-timestamping + OR + host=.ptp=.unsupported=legacy-timestamping + OR + host=.ptp=out-of-tolerance + OR + host=.instance=.ptp=out-of-tolerance + OR + host=.interface=.ptp=signal-loss + Severity: [major, minor] + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: none + Inhibit_Alarms: + Alarm_Type: communication + Probable_Cause: unknown + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +100.150: + Type: Alarm + Description: + critical: "service open file descriptor has reached its limit" + major: "service open file descriptor is approaching to its limit" + Entity_Instance_ID: |- + host=.resource_type=file-descriptor.service_name= + Severity: [critical, major] + Proposed_Repair_Action: "swact to the other controller if it is available" + Maintenance_Action: none + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: critical + Degrade_Affecting_Severity: critical + +#--------------------------------------------------------------------------- +# MAINTENANCE +#--------------------------------------------------------------------------- + + +200.001: + Type: Alarm + Description: was administratively locked to take it out-of-service. + Entity_Instance_ID: host= + Severity: warning + Proposed_Repair_Action: Administratively unlock Host to bring it back in-service. + Maintenance_Action: none + Inhibit_Alarms: True + Alarm_Type: operational-violation + Probable_Cause: out-of-service + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +200.004: + Type: Alarm + Description: |- + experienced a service-affecting failure. + Host is being auto recovered by Reboot. + Entity_Instance_ID: host= + Severity: critical + Proposed_Repair_Action: If auto-recovery is consistently unable to recover host to the unlocked-enabled state contact next level of support or lock and replace failing host. + Maintenance_Action: auto recover + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: application-subsystem-failure + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +200.011: + Type: Alarm + Description: experienced a configuration failure during initialization. Host is being re-configured by Reboot. + Entity_Instance_ID: host= + Severity: critical + Proposed_Repair_Action: If auto-recovery is consistently unable to recover host to the unlocked-enabled state contact next level of support or lock and replace failing host. + Maintenance_Action: auto-recover + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: configuration-or-customization-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +200.010: + Type: Alarm + Description: access to board management module has failed. + Entity_Instance_ID: host= + Severity: warning + Proposed_Repair_Action: Check Host's board management configuration and connectivity. + Maintenance_Action: auto recover + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: communication-subsystem-failure + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +200.012: + Type: Alarm + Description: controller function has in-service failure while compute services remain healthy. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Lock and then Unlock host to recover. Avoid using 'Force Lock' action as that will impact compute services running on this host. If lock action fails then contact next level of support to investigate and recover. + Maintenance_Action: "degrade - requires manual action" + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: communication-subsystem-failure + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +200.013: + Type: Alarm + Description: compute service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local compute service. + Maintenance_Action: "degrade - requires manual action" + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: communication-subsystem-failure + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +200.005: + Type: Alarm + Description: |- + Degrade: + is experiencing an intermittent 'Management Network' communication failures that have exceeded its lower alarming threshold. + + Failure: + is experiencing a persistent critical 'Management Network' communication failure." + Entity_Instance_ID: host= + Severity: [critical, major] + Proposed_Repair_Action: "Check 'Management Network' connectivity and support for multicast messaging. If problem consistently occurs after that and Host is reset, then contact next level of support or lock and replace failing host." + Maintenance_Action: auto recover + Inhibit_Alarms: False + Alarm_Type: communication + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +200.009: + Type: Alarm + Description: |- + Degrade: + is experiencing an intermittent 'Cluster-host Network' communication failures that have exceeded its lower alarming threshold. + + Failure: + is experiencing a persistent critical 'Cluster-host Network' communication failure." + Entity_Instance_ID: host= + Severity: [critical, major] + Proposed_Repair_Action: "Check 'Cluster-host Network' connectivity and support for multicast messaging. If problem consistently occurs after that and Host is reset, then contact next level of support or lock and replace failing host." + Maintenance_Action: auto recover + Inhibit_Alarms: False + Alarm_Type: communication + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +200.006: + Type: Alarm + Description: |- + Main Process Monitor Daemon Failure (major): + 'Process Monitor' (pmond) process is not running or functioning properly. The system is trying to recover this process. + + Monitored Process Failure (critical/major/minor): + Critical: critical '' process has failed and could not be auto-recovered gracefully. + Auto-recovery progression by host reboot is required and in progress. + Major: is degraded due to the failure of its '' process. Auto recovery of this major process is in progress. + Minor: '' process has failed. Auto recovery of this minor process is in progress. + OR + '' process has failed. Manual recovery is required. + Entity_Instance_ID: host=.process= + Severity: [critical, major, minor] + Proposed_Repair_Action: |- + If this alarm does not automatically clear after some time and continues to be asserted after Host is locked and unlocked then contact next level of support for root cause analysis and recovery. + + If problem consistently occurs after Host is locked and unlocked then contact next level of support for root cause analysis and recovery." + Maintenance_Action: + critical: auto-recover + major: degrade + minor: + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: + critical: True + major: True + minor: False + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + +# 200.006: // NOTE using duplicate ID of a completely analogous Alarm for this +# Type: Log +# Description: |- +# Main Process Monitor Daemon Failure (major) +# 'Process Monitor' (pmond) process is not running or functioning properly. +# The system is trying to recover this process. +# +# Monitored Process Failure (critical/major/minor) +# critical: critical '' process has failed and could not be auto-recovered gracefully. +# Auto-recovery progression by host reboot is required and in progress. +# major: is degraded due to the failure of its '' process. Auto recovery of this major process is in progress. +# minor: '' process has failed. Auto recovery of this minor process is in progress. +# OR +# '' process has failed. Manual recovery is required. +# Entity_Instance_ID: host=.process= +# Severity: minor +# Alarm_Type: other +# Probable_Cause: unspecified-reason +# Service_Affecting: True + + +200.007: + Type: Alarm + Description: + critical: "Host is degraded due to a 'critical' out-of-tolerance reading from the '' sensor" + major: "Host is degraded due to a 'major' out-of-tolerance reading from the '' sensor" + minor: "Host is reporting a 'minor' out-of-tolerance reading from the '' sensor" + Entity_Instance_ID: host=.sensor= + Severity: [critical, major, minor] + Proposed_Repair_Action: "If problem consistently occurs after Host is power cycled and or reset, contact next level of support or lock and replace failing host." + Maintenance_Action: + critical: degrade + major: degrade + minor: auto-recover (polling) + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: + critical: True + major: False + minor: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: critical + +200.014: + Type: Alarm + Description: "The Hardware Monitor was unable to load, configure and monitor one or more hardware sensors." + Entity_Instance_ID: host= + Severity: minor + Proposed_Repair_Action: Check Board Management Controller provisioning. Try reprovisioning the BMC. If problem persists try power cycling the host and then the entire server including the BMC power. If problem persists then contact next level of support. + Maintenance_Action: None + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +200.015: + Type: Alarm + Description: Unable to read one or more sensor groups from this host's board management controller + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Check board management connectivity and try rebooting the board management controller. If problem persists contact next level of support or lock and replace failing host. + Maintenance_Action: None + Inhibit_Alarms: False + Alarm_Type: operational-violation + Probable_Cause: unknown + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + + +200.020: + Type: Log + Description: [" has been 'discovered' on the network", + " has been 'added' to the system", + " has 'entered' multi-node failure avoidance", + " has 'exited' multi-node failure avoidance"] + Entity_Instance_ID: [host=.event=discovered, + host=.event=add, + host=.event=mnfa_enter, + host=.event=mnfa_exit] + Severity: warning + Alarm_Type: other + Probable_Cause: unspecified-reason + Service_Affecting: True + + +200.021: + Type: Log + Description: [" board management controller has been 'provisioned'", + " board management controller has been 're-provisioned'", + " board management controller has been 'de-provisioned'", + " manual 'unlock' request", + " manual 'reboot' request", + " manual 'reset' request", + " manual 'power-off' request", + " manual 'power-on' request", + " manual 'reinstall' request", + " manual 'force-lock' request", + " manual 'delete' request", + " manual 'controller switchover' request"] + Entity_Instance_ID: [host=.command=provision, + host=.command=reprovision, + host=.command=deprovision, + host=.command=unlock, + host=.command=reboot, + host=.command=reset, + host=.command=power-off, + host=.command=power-on, + host=.command=reinstall, + host=.command=force-lock, + host=.command=delete, + host=.command=swact] + Severity: warning + Alarm_Type: other + Probable_Cause: unspecified-reason + Service_Affecting: False + + +200.022: + Type: Log + Description: [" is now 'disabled'", + " is now 'enabled'", + " is now 'online'", + " is now 'offline'", + " is 'disabled-failed' to the system", + " reinstall failed", + " reinstall completed successfully"] + Entity_Instance_ID: [host=.state=disabled, + host=.state=enabled, + host=.status=online, + host=.status=offline, + host=.status=failed, + host=.status=reinstall-failed, + host=.status=reinstall-complete] + Severity: warning + Alarm_Type: other + Probable_Cause: unspecified-reason + Service_Affecting: True + + +#--------------------------------------------------------------------------- +# BACKUP AND RESTORE +#--------------------------------------------------------------------------- + +210.001: + Type: Alarm + Description: System Backup in progress. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: No action required. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +#--------------------------------------------------------------------------- +# SYSTEM CONFIGURATION +#--------------------------------------------------------------------------- + +250.001: + Type: Alarm + Description: Configuration is out-of-date. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Administratively lock and unlock to update config. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +250.002: + Type: Alarm + Description: Ceph cache tiering configuration is out-of-date. + Entity_Instance_ID: cluster= + Severity: major + Proposed_Repair_Action: Apply Ceph service parameter settings. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +250.003: + Type: Alarm + Description: "Kubernetes certificates rotation failed on host[, reason = ]" + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Lock and unlock the host to update services with new certificates (Manually renew kubernetes certificates first if renewal failed). + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# Deployment Manager Monitor +#--------------------------------------------------------------------------- +260.001: + Type: Alarm + Description: "Deployment Manager resource not reconciled: " + Entity_Instance_ID: resource=,name= + Severity: major + Proposed_Repair_Action: Monitor and if condition persists, validate deployment configuration. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: configuration-out-of-date + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# VM Compute Services +#--------------------------------------------------------------------------- +270.001: + Type: Alarm + Description: "Host compute services failure[, reason = ]" + Entity_Instance_ID: host=.services=compute + Severity: critical + Proposed_Repair_Action: Wait for host services recovery to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +270.101: + Type: Log + Description: "Host compute services failure[, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +270.102: + Type: Log + Description: Host compute services enabled + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +270.103: + Type: Log + Description: Host compute services disabled + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + + +275.001: + Type: Log + Description: Host hypervisor is now - + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + + +#--------------------------------------------------------------------------- +# DISTRIBUTED CLOUD +#--------------------------------------------------------------------------- + +280.001: + Type: Alarm + Description: is offline + Entity_Instance_ID: subcloud= + Severity: critical + Proposed_Repair_Action: Wait for subcloud to become online; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: communication + Probable_Cause: loss-of-signal + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +280.002: + Type: Alarm + Description: sync_status is out-of-sync + Entity_Instance_ID: [subcloud=.resource=] + Severity: major + Proposed_Repair_Action: If problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: application-subsystem-failure + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +280.003: + Type: Alarm + Description: Subcloud Backup Failure + Entity_Instance_ID: subcloud= + Severity: minor + Proposed_Repair_Action: Retry subcloud backup after checking backup input file. If problem persists contact next level of support. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unknown + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# NETWORK +#--------------------------------------------------------------------------- + +300.001: + Type: Alarm + Description: "'Data' Port failed." + Entity_Instance_ID: host=.port= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +300.002: + Type: Alarm + Description: |- + 'Data' Interface degraded. + OR + 'Data' Interface failed. + Entity_Instance_ID: host=.interface= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: critical + + +300.003: + Type: Alarm + Description: Networking Agent not responding. + Entity_Instance_ID: host=.agent= + Severity: major + Proposed_Repair_Action: "If condition persists, attempt to clear issue by administratively locking and unlocking the Host." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +300.004: + Type: Alarm + Description: No enabled compute host with connectivity to provider network. + Entity_Instance_ID: service=networking.providernet= + Severity: major + Proposed_Repair_Action: Enable compute hosts with required provider network connectivity. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +300.005: + Type: Alarm + Description: |- + Communication failure detected over provider network x% for ranges y% on host z%. + OR + Communication failure detected over provider network x% on host z%. + Entity_Instance_ID: host=.service=networking.providernet= + Severity: major + Proposed_Repair_Action: Check neighbour switch port VLAN assignments. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +300.010: + Type: Alarm + Description: |- + ML2 Driver Agent non-reachable + OR + ML2 Driver Agent reachable but non-responsive + OR + ML2 Driver Agent authentication failure + OR + ML2 Driver Agent is unable to sync Neutron database + Entity_Instance_ID: host=.ml2driver= + Severity: major + Proposed_Repair_Action: "Monitor and if condition persists, contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +300.012: + Type: Alarm + Description: "Openflow Controller connection failed." + Entity_Instance_ID: host=.openflow-controller= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: critical + + +300.013: + Type: Alarm + Description: |- + No active Openflow controller connections found for this network. + OR + One or more Openflow controller connections in disconnected state for this network. + Entity_Instance_ID: host=.openflow-network= + Severity: [critical, major] + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: critical + + +300.014: + Type: Alarm + Description: "OVSDB Manager connection failed." + Entity_Instance_ID: host=.sdn-controller= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: critical + + +300.015: + Type: Alarm + Description: "No active OVSDB connections found." + Entity_Instance_ID: host= + Severity: critical + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: critical + +300.016: + Type: Alarm + Description: "Dynamic routing agent x% lost connectivity to peer y%." + Entity_Instance_ID: host=,agent=,bgp-peer= + Severity: major + Proposed_Repair_Action: If condition persists, fix connectivity to peer. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: loss-of-signal + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +#--------------------------------------------------------------------------- +# HIGH AVAILABILITY +#--------------------------------------------------------------------------- + +400.001: + Type: Alarm + Description: |- + Service group failure; . + OR + Service group degraded; . + OR + Service group warning; . + Entity_Instance_ID: service_domain=.service_group=.host= + Severity: [critical, major, minor] + Proposed_Repair_Action: Contact next level of support. + Maintenance_Action: + Inhibit_Alarms: False + Alarm_Type: processing-error + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: major + + +400.002: + Type: Alarm + Description: |- + Service group loss of redundancy; expected standby member but only standby member available. + OR + Service group loss of redundancy; expected standby member but only standby member available. + OR + Service group loss of redundancy; expected active member but no active members available. + OR + Service group loss of redundancy; expected active member but only active member available. + Entity_Instance_ID: service_domain=.service_group= + Severity: major + Proposed_Repair_Action: "Bring a controller node back in to service, otherwise contact next level of support." + Maintenance_Action: + Inhibit_Alarms: False + Alarm_Type: processing-error + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +400.003: + Type: Alarm + Description: |- + License key is not installed; a valid license key is required for operation. + OR + License key has expired or is invalid; a valid license key is required for operation. + OR + Evaluation license key will expire on ; there are days remaining in this evaluation. + OR + Evaluation license key will expire on ; there is only 1 day remaining in this evaluation. + Entity_Instance_ID: host= + Severity: critical + Proposed_Repair_Action: Contact next level of support to obtain a new license key. + Maintenance_Action: + Inhibit_Alarms: False + Alarm_Type: processing-error + Probable_Cause: key-expired + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: critical + Degrade_Affecting_Severity: none + + +# 400.004: // NOTE Removed +# Type: Alarm +# Description: Service group software modification detected; . +# Entity_Instance_ID: host= +# Severity: major +# Proposed_Repair_Action: Contact next level of support. +# Maintenance_Action: +# Inhibit_Alarms: False +# Alarm_Type: processing-error +# Probable_Cause: software-program-error +# Service_Affecting: True +# Suppression: False + + +400.005: + Type: Alarm + Description: |- + Communication failure detected with peer over port . + OR + Communication failure detected with peer over port within the last 30 seconds. + Entity_Instance_ID: host=.network= + Severity: major + Proposed_Repair_Action: Check cabling and far-end port configuration and status on adjacent equipment. + Maintenance_Action: + Inhibit_Alarms: False + Alarm_Type: communication + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + + +#--------------------------------------------------------------------------- +# SM +#--------------------------------------------------------------------------- + +401.001: + Type: Log + Description: Service group state change from to on host + Entity_Instance_ID: service_domain=.service_group=.host= + Severity: critical + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + +401.002: + Type: Log + Description: |- + Service group loss of redundancy; expected standby member but no standby members available + or + Service group loss of redundancy; expected standby member but only standby member(s) available + or + Service group has no active members available; expected active member(s) + or + Service group loss of redundancy; expected active member(s) but only active member(s) available + Entity_Instance_ID: service_domain=.service_group= + Severity: critical + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + +401.003: + Type: Log + Description: |- + License key has expired or is invalid + or + Evaluation license key will expire on + or + License key is valid + Entity_Instance_ID: host= + Severity: critical + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + +401.005: + Type: Log + Description: |- + Communication failure detected with peer over port on host + or + Communication failure detected with peer over port on host within the last seconds + or + Communication established with peer over port on host + Entity_Instance_ID: host=.network= + Severity: critical + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + +401.007: + Type: Log + Description: Swact or swact-force + Entity_Instance_ID: host= + Severity: critical + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + + +#--------------------------------------------------------------------------- +# SECURITY +#--------------------------------------------------------------------------- + +500.100: + Type: Alarm + Description: TPM initialization failed on host. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: reinstall HTTPS certificate; if problem persists contact next level of support. + Maintenance_Action: degrade + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +500.101: + Type: Alarm + Description: Developer patch certificate enabled. + Entity_Instance_ID: host=controller + Severity: critical + Proposed_Repair_Action: Reinstall system to disable developer certificate and remove untrusted patches. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +500.200: + Type: Alarm + Description: |- + Certificate 'system certificate-show ' (mode=) expiring soon on . + OR + Certificate '/' expiring soon on . + OR + Certificate '' expiring soon on . + Entity_Instance_ID: |- + system.certificate.mode=.uuid= + OR + namespace=.certificate= + OR + namespace=.secret= + OR + system.certificate.k8sRootCA + Severity: major + Proposed_Repair_Action: Check certificate expiration time. Renew certificate for the entity identified. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: certificate-expiration + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +500.210: + Type: Alarm + Description: |- + Certificate 'system certificate-show ' (mode=) expired. + OR + Certificate '/' expired. + OR + Certificate '' expired. + Entity_Instance_ID: |- + system.certificate.mode=.uuid= + OR + namespace=.certificate= + OR + namespace=.secret= + OR + system.certificate.k8sRootCA + Severity: critical + Proposed_Repair_Action: Check certificate expiration time. Renew certificate for the entity identified. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: certificate-expiration + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +500.500: + Type: Log + Description: "Host has IMA Appraisal failure for service when executing , reason = ]" + Entity_Instance_ID: host=.service= + Severity: major + Alarm_Type: integrity-violation + Probable_Cause: information-modification-detected + Service_Affecting: False + + +#--------------------------------------------------------------------------- +# VM +#--------------------------------------------------------------------------- + +700.001: + Type: Alarm + Description: |- + Instance owned by has failed on host + Instance owned by has failed to schedule + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: The system will attempt recovery; no repair action required + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: software-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.002: + Type: Alarm + Description: Instance owned by is paused on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Unpause the instance + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.003: + Type: Alarm + Description: Instance owned by is suspended on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Resume the instance + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.004: + Type: Alarm + Description: Instance owned by is stopped on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Start the instance + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.005: + Type: Alarm + Description: Instance owned by is rebooting on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Wait for reboot to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.006: + Type: Alarm + Description: Instance owned by is rebuilding on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Wait for rebuild to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.007: + Type: Alarm + Description: Instance owned by is evacuating from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Wait for evacuate to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.008: + Type: Alarm + Description: Instance owned by is live migrating from host + Entity_Instance_ID: tenant=.instance= + Severity: warning + Proposed_Repair_Action: Wait for live migration to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.009: + Type: Alarm + Description: Instance owned by is cold migrating from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Wait for cold migration to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.010: + Type: Alarm + Description: Instance owned by has been cold-migrated to host waiting for confirmation + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Confirm or revert cold-migrate of instance + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.011: + Type: Alarm + Description: Instance owned by is reverting cold migrate to host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: "Wait for cold migration revert to complete; if problem persists contact next level of support" + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.012: + Type: Alarm + Description: Instance owned by is resizing on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: Wait for resize to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.013: + Type: Alarm + Description: Instance owned by has been resized on host waiting for confirmation + Entity_Instance_ID: itenant=.instance= + Severity: critical + Proposed_Repair_Action: Confirm or revert resize of instance + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.014: + Type: Alarm + Description: Instance owned by is reverting resize on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Proposed_Repair_Action: "Wait for resize revert to complete; if problem persists contact next level of support" + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.015: + Type: Alarm + Description: Guest Heartbeat not established for instance owned by on host + Entity_Instance_ID: tenant=.instance= + Severity: major + Proposed_Repair_Action: "Verify that the instance is running the Guest-Client daemon, or disable Guest Heartbeat for the instance if no longer needed, otherwise contact next level of support" + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: communication + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.016: + Type: Alarm + Description: Multi-Node Recovery Mode + Entity_Instance_ID: subsystem=vim + Severity: minor + Proposed_Repair_Action: "Wait for the system to exit out of this mode" + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +700.017: + Type: Alarm + Description: Server group policy was not satisfied + Entity_Instance_ID: server-group + Severity: minor + Proposed_Repair_Action: "Migrate instances in an attempt to satisfy the policy; if problem persists contact next level of support" + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: procedural-error + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + + +700.101: + Type: Log + Description: Instance is enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.102: + Type: Log + Description: Instance owned by has failed[, reason = ] + Instance owned by has failed to schedule[, reason = ] + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.103: + Type: Log + Description: Create issued |by the system> against owned by + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.104: + Type: Log + Description: Creating instance owned by + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.105: + Type: Log + Description: "Create rejected for instance [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.106: + Type: Log + Description: "Create cancelled for instance [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.107: + Type: Log + Description: "Create failed for instance [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.108: + Type: Log + Description: Inance owned by has been created + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.109: + Type: Log + Description: "Delete issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.110: + Type: Log + Description: Deleting instance owned by + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.111: + Type: Log + Description: "Delete rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.112: + Type: Log + Description: "Delete cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.113: + Type: Log + Description: "Delete failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.114: + Type: Log + Description: Deleted instance owned by + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.115: + Type: Log + Description: "Pause issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.116: + Type: Log + Description: Pause inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.117: + Type: Log + Description: "Pause rejected for instance enabled on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.118: + Type: Log + Description: "Pause cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.119: + Type: Log + Description: "Pause failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.120: + Type: Log + Description: Pause complete for instance now paused on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.121: + Type: Log + Description: "Unpause issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.122: + Type: Log + Description: Unpause inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.123: + Type: Log + Description: "Unpause rejected for instance paused on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.124: + Type: Log + Description: "Unpause cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.125: + Type: Log + Description: "Unpause failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.126: + Type: Log + Description: Unpause complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.127: + Type: Log + Description: "Suspend issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.128: + Type: Log + Description: Suspend inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.129: + Type: Log + Description: "Suspend rejected for instance enabled on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.130: + Type: Log + Description: "Suspend cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.131: + Type: Log + Description: "Suspend failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.132: + Type: Log + Description: Suspend complete for instance now suspended on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.133: + Type: Log + Description: "Resume issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.134: + Type: Log + Description: Resume inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.135: + Type: Log + Description: "Resume rejected for instance suspended on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.136: + Type: Log + Description: "Resume cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.137: + Type: Log + Description: "Resume failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.138: + Type: Log + Description: Resume complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.139: + Type: Log + Description: "Start issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.140: + Type: Log + Description: Start inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.141: + Type: Log + Description: "Start rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.142: + Type: Log + Description: "Start cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.143: + Type: Log + Description: "Start failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.144: + Type: Log + Description: Start complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.145: + Type: Log + Description: "Stop issued |by the system|by the instance> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.146: + Type: Log + Description: Stop inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.147: + Type: Log + Description: "Stop rejected for instance enabled on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.148: + Type: Log + Description: "Stop cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.149: + Type: Log + Description: "Stop failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.150: + Type: Log + Description: Stop complete for instance now disabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.151: + Type: Log + Description: "Live-Migrate issued |by the system> against instance owned by from host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.152: + Type: Log + Description: Live-Migrate inprogress for instance from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.153: + Type: Log + Description: "Live-Migrate rejected for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.154: + Type: Log + Description: "Live-Migrate cancelled for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.155: + Type: Log + Description: "Live-Migrate failed for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.156: + Type: Log + Description: Live-Migrate complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.157: + Type: Log + Description: "Cold-Migrate issued |by the system> against instance owned by from host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.158: + Type: Log + Description: Cold-Migrate inprogress for instance from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.159: + Type: Log + Description: "Cold-Migrate rejected for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.160: + Type: Log + Description: "Cold-Migrate cancelled for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.161: + Type: Log + Description: "Cold-Migrate failed for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.162: + Type: Log + Description: Cold-Migrate complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.163: + Type: Log + Description: "Cold-Migrate-Confirm issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.164: + Type: Log + Description: Cold-Migrate-Confirm inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.165: + Type: Log + Description: "Cold-Migrate-Confirm rejected for instance now enabled on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.166: + Type: Log + Description: "Cold-Migrate-Confirm cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.167: + Type: Log + Description: "Cold-Migrate-Confirm failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.168: + Type: Log + Description: Cold-Migrate-Confirm complete for instance enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.169: + Type: Log + Description: "Cold-Migrate-Revert issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.170: + Type: Log + Description: Cold-Migrate-Revert inprogress for instance from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.171: + Type: Log + Description: "Cold-Migrate-Revert rejected for instance now on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.172: + Type: Log + Description: "Cold-Migrate-Revert cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.173: + Type: Log + Description: "Cold-Migrate-Revert failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.174: + Type: Log + Description: Cold-Migrate-Revert complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.175: + Type: Log + Description: "Evacuate issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.176: + Type: Log + Description: Evacuating instance owned by from host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.177: + Type: Log + Description: "Evacuate rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.178: + Type: Log + Description: "Evacuate cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.179: + Type: Log + Description: "Evacuate failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.180: + Type: Log + Description: Evacuate complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.181: + Type: Log + Description: "Reboot <(soft-reboot)|(hard-reboot)> issued |by the system|by the instance> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.182: + Type: Log + Description: Reboot inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.183: + Type: Log + Description: "Reboot rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.184: + Type: Log + Description: "Reboot cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.185: + Type: Log + Description: "Reboot failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.186: + Type: Log + Description: Reboot complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.187: + Type: Log + Description: "Rebuild issued |by the system> against instance using image on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.188: + Type: Log + Description: Rebuild inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.189: + Type: Log + Description: "Rebuild rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.190: + Type: Log + Description: "Rebuild cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.191: + Type: Log + Description: "Rebuild failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.192: + Type: Log + Description: Rebuild complete for instance now enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.193: + Type: Log + Description: "Resize issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.194: + Type: Log + Description: Resize inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.195: + Type: Log + Description: "Resize rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.196: + Type: Log + Description: "Resize cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.197: + Type: Log + Description: "Resize failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.198: + Type: Log + Description: Resize complete for instance enabled on host waiting for confirmation + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.199: + Type: Log + Description: "Resize-Confirm issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.200: + Type: Log + Description: Resize-Confirm inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.201: + Type: Log + Description: "Resize-Confirm rejected for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.202: + Type: Log + Description: "Resize-Confirm cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.203: + Type: Log + Description: "Resize-Confirm failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.204: + Type: Log + Description: Resize-Confirm complete for instance enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.205: + Type: Log + Description: "Resize-Revert issued |by the system> against instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.206: + Type: Log + Description: Resize-Revert inprogress for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.207: + Type: Log + Description: "Resize-Revert rejected for instance owned by on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.208: + Type: Log + Description: "Resize-Revert cancelled for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.209: + Type: Log + Description: "Resize-Revert failed for instance on host [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.210: + Type: Log + Description: Resize-Revert complete for instance enabled on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.211: + Type: Log + Description: Guest Heartbeat established for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: major + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.212: + Type: Log + Description: Guest Heartbeat disconnected for instance on host + Entity_Instance_ID: tenant=.instance= + Severity: major + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.213: + Type: Log + Description: "Guest Heartbeat failed for instance [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.214: + Type: Log + Description: Instance has been renamed to owned by on host + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.215: + Type: Log + Description: "Guest Health Check failed for instance [, reason = ]" + Entity_Instance_ID: tenant=.instance= + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +700.216: + Type: Log + Description: "Entered Multi-Node Recovery Mode" + Entity_Instance_ID: subsystem=vim + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + + +700.217: + Type: Log + Description: "Exited Multi-Node Recovery Mode" + Entity_Instance_ID: subsystem=vim + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +#--------------------------------------------------------------------------- +# APPLICATION +#--------------------------------------------------------------------------- + +750.001: + Type: Alarm + Description: "Application Upload Failure" + Entity_Instance_ID: k8s_application= + Severity: warning + Proposed_Repair_Action: "Check system inventory log for cause." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unknown + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +750.002: + Type: Alarm + Description: "Application Apply Failure" + Entity_Instance_ID: k8s_application= + Severity: major + Proposed_Repair_Action: "Retry applying the application. Check application is managed by the system application framework. + If the issue persists, please check system inventory log for cause." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +750.003: + Type: Alarm + Description: "Application Remove Failure" + Entity_Instance_ID: k8s_application= + Severity: major + Proposed_Repair_Action: "Retry removing the application. If the issue persists, please check system inventory log for cause." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +750.004: + Type: Alarm + Description: "Application Apply In Progress" + Entity_Instance_ID: k8s_application= + Severity: warning + Proposed_Repair_Action: "No action required." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +750.005: + Type: Alarm + Description: "Application Update In Progress" + Entity_Instance_ID: k8s_application= + Severity: warning + Proposed_Repair_Action: "No action required." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: unknown + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +750.006: + Type: Alarm + Description: "Automatic Application Re-Apply Is Pending" + Entity_Instance_ID: k8s_application= + Severity: warning + Proposed_Repair_Action: "Ensure all hosts are either locked or unlocked. When the system is stable the application will be automatically reapplied." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: other + Probable_Cause: unknown + Service_Affecting: False + Suppression: True + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# STORAGE +#--------------------------------------------------------------------------- + +800.001: + Type: Alarm + Description: |- + Storage Alarm Condition: + 1 mons down, quorum 1,2 controller-1,storage-0 + Entity_Instance_ID: cluster= + Severity: [critical, major] + Proposed_Repair_Action: "If problem persists, contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: equipment-malfunction + Service_Affecting: + critical: True + major: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +800.010: + Type: Alarm + Description: |- + Potential data loss. No available OSDs in storage replication group. + Entity_Instance_ID: cluster=.peergroup= + Severity: [critical] + Proposed_Repair_Action: "Ensure storage hosts from replication group are unlocked and available. + Check if OSDs of each storage host are up and running. + If problem persists contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: equipment-malfunction + Service_Affecting: + critical: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +800.011: + Type: Alarm + Description: |- + Loss of replication in peergroup. + Entity_Instance_ID: cluster=.peergroup= + Severity: [major] + Proposed_Repair_Action: "Ensure storage hosts from replication group are unlocked and available. + Check if OSDs of each storage host are up and running. + If problem persists contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: equipment-malfunction + Service_Affecting: + major: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +800.002: + Type: Alarm + Description: ["Image storage media is full: There is not enough disk space on the image storage media.", + "Instance snapshot failed: There is not enough disk space on the image storage media.", + "Supplied () and generated from uploaded image () did not match. Setting image status to 'killed'.", + "Error in store configuration. Adding images to store is disabled.", + "Forbidden upload attempt: ", + "Insufficient permissions on image storage media: ", + "Denying attempt to upload image larger than bytes.", + "Denying attempt to upload image because it exceeds the quota: ", + "Received HTTP error while uploading image ", + "Client disconnected before sending all data to backend", + "Failed to upload image "] + Entity_Instance_ID: ["image=, instance=", + "tenant=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance=", + "image=, instance="] + Alarm_Type: [physical-violation, + physical-violation, + integrity-violation, + integrity-violation, + security-service-or-mechanism-violation, + security-service-or-mechanism-violation, + security-service-or-mechanism-violation, + security-service-or-mechanism-violation, + communication, + communication, + operational-violation] + Severity: warning + Proposed_Repair_Action: + Maintenance_Action: + Inhibit_Alarms: + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +800.100: + Type: Alarm + Description: |- + Storage Alarm Condition: + Cinder I/O Congestion is above normal range and is building + Entity_Instance_ID: cinder_io_monitor + Severity: major + Proposed_Repair_Action: "Reduce the I/O load on the Cinder LVM backend. Use + Cinder QoS mechanisms on high usage volumes." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: qos + Probable_Cause: congestion + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +800.101: + Type: Alarm + Description: |- + Storage Alarm Condition: + Cinder I/O Congestion is high and impacting guest performance + Entity_Instance_ID: cinder_io_monitor + Severity: critical + Proposed_Repair_Action: "Reduce the I/O load on the Cinder LVM backend. + Cinder actions may fail until congestion is reduced. + Use Cinder QoS mechanisms on high usage volumes." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: qos + Probable_Cause: congestion + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +800.103: + Type: Alarm + Description: |- + Storage Alarm Condition: + [ Metadata usage for LVM thin pool / exceeded threshold and automatic extension failed, + Metadata usage for LVM thin pool / exceeded threshold ]; threshold x%, actual y%. + Entity_Instance_ID: .lvmthinpool=/ + Severity: critical + Proposed_Repair_Action: "Increase Storage Space Allotment for Cinder on the 'lvm' backend. + Consult the System Administration Manual for more details. + If problem persists, contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: threshold-crossed + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: major + Degrade_Affecting_Severity: none + +800.104: + Type: Alarm + Description: |- + Storage Alarm Condition: + configuration failed to apply on host: . + Entity_Instance_ID: storage_backend= + Severity: critical + Proposed_Repair_Action: "Update backend setting to reapply configuration. + Consult the System Administration Manual for more details. + If problem persists, contact next level of support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: configuration-or-customization-error + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: major + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# KUBERNETES +#--------------------------------------------------------------------------- + +850.001: + Type: Alarm + Description: Persistent Volume Migration Error + Entity_Instance_ID: kubernetes=PV-migration-failed + Severity: major + Proposed_Repair_Action: "Manually execute /usr/bin/ceph_k8s_update_monitors.sh + to confirm PVs are updated, then lock/unlock to clear + alarms. If problem persists, contact next level of + support." + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: processing-error + Probable_Cause: communication-subsystem-failure + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: none + Degrade_Affecting_Severity: none + +#--------------------------------------------------------------------------- +# SOFTWARE +#--------------------------------------------------------------------------- + +900.001: + Type: Alarm + Description: Patching operation in progress. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: Complete reboots of affected hosts. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: environmental + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.002: + Type: Alarm + Description: Patch host install failure. Command "sw-patch host-install" failed. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Undo patching operation. Check patch logs on the target host (i.e. /var/log/patching.log) + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: environmental + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.003: + Type: Alarm + Description: A patch with state 'obsolete' in its metadata has been uploaded. + Entity_Instance_ID: host=controller + Severity: warning + Proposed_Repair_Action: Remove and delete obsolete patches. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: environmental + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.004: + Type: Alarm + Description: The upgrade and running software version do not match. Command host-upgrade failed. + Entity_Instance_ID: host= + Severity: major + Proposed_Repair_Action: Reinstall host to update applied load. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.005: + Type: Alarm + Description: System Upgrade in progress. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: No action required. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.006: + Type: Alarm + Description: Device image update operation in progress. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: Complete reboots of affected hosts. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: environmental + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.007: + Type: Alarm + Description: Kubernetes upgrade in progress. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: No action required. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.008: + Type: Alarm + Description: Kubernetes rootca update in progress + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: Wait for kubernetes rootca procedure to complete + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.009: + Type: Alarm + Description: Kubernetes root CA update aborted, certificates may not be fully updated. Command "system kube-rootca-update-abort" has been run. + Entity_Instance_ID: host=controller + Severity: minor + Proposed_Repair_Action: Fully update certificates by a new root CA update. + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: operational-violation + Probable_Cause: unspecified-reason + Service_Affecting: False + Suppression: False + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.101: + Type: Alarm + Description: Software patch auto-apply inprogress + Entity_Instance_ID: orchestration=sw-patch + Severity: major + Proposed_Repair_Action: Wait for software patch auto-apply to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.102: + Type: Alarm + Description: Software patch auto-apply aborting + Entity_Instance_ID: orchestration=sw-patch + Severity: major + Proposed_Repair_Action: Wait for software patch auto-apply abort to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.103: + Type: Alarm + Description: Software patch auto-apply failed. Command "sw-manager patch-strategy apply" failed. + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Proposed_Repair_Action: Attempt to apply software patches manually; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.111: + Type: Log + Description: Software patch auto-apply start + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.112: + Type: Log + Description: Software patch auto-apply inprogress + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.113: + Type: Log + Description: Software patch auto-apply rejected + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.114: + Type: Log + Description: Software patch auto-apply cancelled + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.115: + Type: Log + Description: Software patch auto-apply failed + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.116: + Type: Log + Description: Software patch auto-apply completed + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.117: + Type: Log + Description: Software patch auto-apply abort + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.118: + Type: Log + Description: Software patch auto-apply aborting + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.119: + Type: Log + Description: Software patch auto-apply abort rejected + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.120: + Type: Log + Description: Software patch auto-apply abort failed + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.121: + Type: Log + Description: Software patch auto-apply aborted + Entity_Instance_ID: orchestration=sw-patch + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.201: + Type: Alarm + Description: Software upgrade auto-apply inprogress + Entity_Instance_ID: orchestration=sw-upgrade + Severity: major + Proposed_Repair_Action: Wait for software upgrade auto-apply to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.202: + Type: Alarm + Description: Software upgrade auto-apply aborting + Entity_Instance_ID: orchestration=sw-upgrade + Severity: major + Proposed_Repair_Action: Wait for software upgrade auto-apply abort to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.203: + Type: Alarm + Description: Software upgrade auto-apply failed. Command "sw-manager update-strategy apply" failed + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Proposed_Repair_Action: Attempt to apply software upgrade manually; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.211: + Type: Log + Description: Software upgrade auto-apply start + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.212: + Type: Log + Description: Software upgrade auto-apply inprogress + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.213: + Type: Log + Description: Software upgrade auto-apply rejected + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.214: + Type: Log + Description: Software upgrade auto-apply cancelled + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.215: + Type: Log + Description: Software upgrade auto-apply failed + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.216: + Type: Log + Description: Software upgrade auto-apply completed + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.217: + Type: Log + Description: Software upgrade auto-apply abort + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.218: + Type: Log + Description: Software upgrade auto-apply aborting + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.219: + Type: Log + Description: Software upgrade auto-apply abort rejected + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.220: + Type: Log + Description: Software upgrade auto-apply abort failed + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.221: + Type: Log + Description: Software upgrade auto-apply aborted + Entity_Instance_ID: orchestration=sw-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.301: + Type: Alarm + Description: Firmware Update auto-apply inprogress + Entity_Instance_ID: orchestration=fw-update + Severity: major + Proposed_Repair_Action: Wait for firmware update auto-apply to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.302: + Type: Alarm + Description: Firmware Update auto-apply aborting + Entity_Instance_ID: orchestration=fw-update + Severity: major + Proposed_Repair_Action: Wait for firmware update auto-apply abort to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.303: + Type: Alarm + Description: Firmware Update auto-apply failed. Command "sw-manager kube-rootca-update-strategy apply" failed. + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Proposed_Repair_Action: Attempt to apply firmware update manually; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.311: + Type: Log + Description: Firmware update auto-apply start + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.312: + Type: Log + Description: Firmware update auto-apply inprogress + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.313: + Type: Log + Description: Firmware update auto-apply rejected + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.314: + Type: Log + Description: Firmware update auto-apply cancelled + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.315: + Type: Log + Description: Firmware update auto-apply failed + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.316: + Type: Log + Description: Firmware update auto-apply completed + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.317: + Type: Log + Description: Firmware update auto-apply abort + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.318: + Type: Log + Description: Firmware update auto-apply aborting + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.319: + Type: Log + Description: Firmware update auto-apply abort rejected + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.320: + Type: Log + Description: Firmware update auto-apply abort failed + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.321: + Type: Log + Description: Firmware update auto-apply aborted + Entity_Instance_ID: orchestration=fw-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.401: + Type: Alarm + Description: Kubernetes upgrade auto-apply inprogress + Entity_Instance_ID: orchestration=kube-upgrade + Severity: major + Proposed_Repair_Action: Wait for kubernetes upgrade auto-apply to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.402: + Type: Alarm + Description: Kubernetes upgrade auto-apply aborting + Entity_Instance_ID: orchestration=kube-upgrade + Severity: major + Proposed_Repair_Action: Wait for kubernetes upgrade auto-apply abort to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.403: + Type: Alarm + Description: Kubernetes upgrade auto-apply failed + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Proposed_Repair_Action: Attempt to apply kubernetes upgrade manually; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.411: + Type: Log + Description: Kubernetes upgrade auto-apply start + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.412: + Type: Log + Description: Kubernetes upgrade auto-apply inprogress + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.413: + Type: Log + Description: Kubernetes upgrade auto-apply rejected + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.414: + Type: Log + Description: Kubernetes upgrade auto-apply cancelled + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.415: + Type: Log + Description: Kubernetes upgrade auto-apply failed + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.416: + Type: Log + Description: Kubernetes upgrade auto-apply completed + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.417: + Type: Log + Description: Kubernetes upgrade auto-apply abort + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.418: + Type: Log + Description: Kubernetes upgrade auto-apply aborting + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.419: + Type: Log + Description: Kubernetes upgrade auto-apply abort rejected + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.420: + Type: Log + Description: Kubernetes upgrade auto-apply abort failed + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.421: + Type: Log + Description: Kubernetes upgrade auto-apply aborted + Entity_Instance_ID: orchestration=kube-upgrade + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.501: + Type: Alarm + Description: Kubernetes rootca update auto-apply inprogress + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: major + Proposed_Repair_Action: Wait for kubernetes rootca update auto-apply to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.502: + Type: Alarm + Description: Kubernetes rootca update auto-apply aborting + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: major + Proposed_Repair_Action: Wait for kubernetes rootca update auto-apply abort to complete; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.503: + Type: Alarm + Description: Kubernetes rootca update auto-apply failed. Command "sw-manager kube-upgrade-strategy apply" failed. + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Proposed_Repair_Action: Attempt to apply kubernetes rootca update manually; if problem persists contact next level of support + Maintenance_Action: + Inhibit_Alarms: + Alarm_Type: equipment + Probable_Cause: underlying-resource-unavailable + Service_Affecting: True + Suppression: True + Management_Affecting_Severity: warning + Degrade_Affecting_Severity: none + +900.511: + Type: Log + Description: Kubernetes rootca update auto-apply start + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.512: + Type: Log + Description: Kubernetes rootca update auto-apply inprogress + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.513: + Type: Log + Description: Kubernetes rootca update auto-apply rejected + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.514: + Type: Log + Description: Kubernetes rootca update auto-apply cancelled + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.515: + Type: Log + Description: Kubernetes rootca update auto-apply failed + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.516: + Type: Log + Description: Kubernetes rootca update auto-apply completed + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.517: + Type: Log + Description: Kubernetes rootca update auto-apply abort + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.518: + Type: Log + Description: Kubernetes rootca update auto-apply aborting + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.519: + Type: Log + Description: Kubernetes rootca update auto-apply abort rejected + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.520: + Type: Log + Description: Kubernetes rootca update auto-apply abort failed + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False + +900.521: + Type: Log + Description: Kubernetes rootca update auto-apply aborted + Entity_Instance_ID: orchestration=kube-rootca-update + Severity: critical + Alarm_Type: equipment + Probable_Cause: unspecified-reason + Service_Affecting: False +... diff --git a/o2app/adapter/unit_of_work.py b/o2app/adapter/unit_of_work.py index 37b30d3..f046db5 100644 --- a/o2app/adapter/unit_of_work.py +++ b/o2app/adapter/unit_of_work.py @@ -21,7 +21,7 @@ from sqlalchemy.orm.session import Session from o2common.config import config from o2common.service.unit_of_work import AbstractUnitOfWork -from o2ims.adapter import ocloud_repository +from o2ims.adapter import ocloud_repository, alarm_repository, alarm_loader from o2dms.adapter import dms_repository from o2common.helper import o2logging @@ -67,6 +67,18 @@ class SqlAlchemyUnitOfWork(AbstractUnitOfWork): .NfDeploymentSqlAlchemyRepository(self.session) self.ocloudvresources = dms_repository\ .NfOCloudVResourceSqlAlchemyRepository(self.session) + self.alarm_event_records = alarm_repository\ + .AlarmEventRecordSqlAlchemyRepository(self.session) + self.alarm_definitions = alarm_repository\ + .AlarmDefinitionSqlAlchemyRepository(self.session) + self.alarm_subscriptions = alarm_repository\ + .AlarmSubscriptionSqlAlchemyRepository(self.session) + self.alarm_probable_causes = alarm_repository\ + .AlarmProbableCauseSqlAlchemyRepository(self.session) + + # config file + self.alarm_dictionaries = alarm_loader\ + .AlarmDictionaryConfigFileRepository() return super().__enter__() def __exit__(self, *args): @@ -111,3 +123,9 @@ class SqlAlchemyUnitOfWork(AbstractUnitOfWork): for entry in self.ocloudvresources.seen: while hasattr(entry, 'events') and len(entry.events) > 0: yield entry.events.pop(0) + for entry in self.alarm_event_records.seen: + while hasattr(entry, 'events') and len(entry.events) > 0: + yield entry.events.pop(0) + for entry in self.alarm_subscriptions.seen: + while hasattr(entry, 'events') and len(entry.events) > 0: + yield entry.events.pop(0) diff --git a/o2app/bootstrap.py b/o2app/bootstrap.py index 329d9e2..228b240 100644 --- a/o2app/bootstrap.py +++ b/o2app/bootstrap.py @@ -21,6 +21,7 @@ from o2common.adapter.notifications import AbstractNotifications,\ from o2common.adapter import redis_eventpublisher from o2common.service import unit_of_work from o2common.service import messagebus +from o2common.config import config from o2app.service import handlers from o2app.adapter.unit_of_work import SqlAlchemyUnitOfWork @@ -28,6 +29,9 @@ from o2app.adapter.unit_of_work import SqlAlchemyUnitOfWork from o2ims.adapter import orm as o2ims_orm from o2dms.adapter import orm as o2dms_orm +from o2ims.adapter.clients import alarm_dict_client + + from o2common.helper import o2logging logger = o2logging.get_logger(__name__) diff --git a/o2app/entrypoints/flask_application.py b/o2app/entrypoints/flask_application.py index 55385c2..fff1201 100644 --- a/o2app/entrypoints/flask_application.py +++ b/o2app/entrypoints/flask_application.py @@ -15,10 +15,12 @@ from flask import Flask from flask_restx import Api + from o2app import bootstrap from o2ims.views import configure_namespace as ims_route_configure_namespace from o2dms.api import configure_namespace as dms_route_configure_namespace +from o2ims.adapter.clients.alarm_dict_client import load_alarm_definition # apibase = config.get_o2ims_api_base() app = Flask(__name__) @@ -31,3 +33,5 @@ bus = bootstrap.bootstrap() ims_route_configure_namespace(api) dms_route_configure_namespace(api) + +load_alarm_definition(bus.uow) diff --git a/o2app/entrypoints/redis_eventconsumer.py b/o2app/entrypoints/redis_eventconsumer.py index 5630174..04ef31c 100644 --- a/o2app/entrypoints/redis_eventconsumer.py +++ b/o2app/entrypoints/redis_eventconsumer.py @@ -24,11 +24,13 @@ from o2ims.domain import commands as imscmd from o2common.helper import o2logging from o2ims.domain.subscription_obj import Message2SMO, NotificationEventEnum,\ RegistrationMessage +from o2ims.domain.alarm_obj import AlarmEvent2SMO logger = o2logging.get_logger(__name__) r = redis.Redis(**config.get_redis_host_and_port()) apibase = config.get_o2ims_api_base() +api_monitoring_base = config.get_o2ims_monitoring_api_base() def main(): @@ -39,16 +41,17 @@ def main(): pubsub.subscribe('ResourceChanged') pubsub.subscribe('ConfigurationChanged') pubsub.subscribe('OcloudChanged') + pubsub.subscribe('AlarmEventChanged') for m in pubsub.listen(): try: - handle_dms_changed(m, bus) + handle_changed(m, bus) except Exception as ex: logger.warning("{}".format(str(ex))) continue -def handle_dms_changed(m, bus): +def handle_changed(m, bus): logger.info("handling %s", m) channel = m['channel'].decode("UTF-8") if channel == "NfDeploymentStateChanged": @@ -85,6 +88,16 @@ def handle_dms_changed(m, bus): if data['notificationEventType'] == NotificationEventEnum.CREATE: cmd = imscmd.Register2SMO(data=RegistrationMessage(is_all=True)) bus.handle(cmd) + elif channel == 'AlarmEventChanged': + datastr = m['data'] + data = json.loads(datastr) + logger.info('AlarmEventChanged with cmd:{}'.format(data)) + ref = api_monitoring_base + '/alarms/' + data['id'] + cmd = imscmd.PubAlarm2SMO(data=AlarmEvent2SMO( + id=data['id'], ref=ref, + eventtype=data['notificationEventType'], + updatetime=data['updatetime'])) + bus.handle(cmd) else: logger.info("unhandled:{}".format(channel)) diff --git a/o2app/entrypoints/resource_watcher.py b/o2app/entrypoints/resource_watcher.py index 98145dc..99d5595 100644 --- a/o2app/entrypoints/resource_watcher.py +++ b/o2app/entrypoints/resource_watcher.py @@ -21,9 +21,12 @@ from o2common.service.watcher.worker import PollWorker from o2ims.service.watcher.ocloud_watcher import OcloudWatcher from o2ims.service.watcher.ocloud_watcher import DmsWatcher from o2ims.service.watcher.resourcepool_watcher import ResourcePoolWatcher +from o2ims.service.watcher.alarm_watcher import AlarmWatcher + from o2ims.adapter.clients.ocloud_client import StxDmsClient from o2ims.adapter.clients.ocloud_client import StxOcloudClient from o2ims.adapter.clients.ocloud_client import StxResourcePoolClient +from o2ims.adapter.clients.fault_client import StxAlarmClient from o2ims.service.watcher.pserver_watcher import PServerWatcher from o2ims.adapter.clients.ocloud_client import StxPserverClient @@ -62,6 +65,8 @@ class WatcherService(cotyledon.Service): StxOcloudClient(), self.bus)) root.addchild( DmsWatcher(StxDmsClient(), self.bus)) + # root.addchild( + # AlarmWatcher(StxFaultClient(), self.bus)) child_respool = root.addchild( ResourcePoolWatcher(StxResourcePoolClient(), @@ -81,6 +86,11 @@ class WatcherService(cotyledon.Service): self.worker.add_watcher(root) + # Add Alarm watch + root = WatcherTree( + AlarmWatcher(StxAlarmClient(self.bus.uow), self.bus)) + self.worker.add_watcher(root) + self.worker.start() except Exception as ex: logger.warning("WorkerService Exception:" + str(ex)) diff --git a/o2app/service/handlers.py b/o2app/service/handlers.py index 9e88ff8..d630720 100644 --- a/o2app/service/handlers.py +++ b/o2app/service/handlers.py @@ -26,10 +26,11 @@ from o2dms.domain import events as o2dms_events from o2ims.service.auditor import ocloud_handler, dms_handler, \ resourcepool_handler, pserver_handler, pserver_cpu_handler, \ pserver_mem_handler, pserver_port_handler, pserver_if_handler,\ - pserver_eth_handler -from o2ims.service.command import notify_handler, registration_handler + pserver_eth_handler, alarm_handler +from o2ims.service.command import notify_handler, registration_handler,\ + notify_alarm_handler from o2ims.service.event import ocloud_event, resource_event, \ - resource_pool_event, configuration_event + resource_pool_event, configuration_event, alarm_event # if TYPE_CHECKING: # from . import unit_of_work @@ -57,12 +58,15 @@ EVENT_HANDLERS = { notify_resourcepool_change], events.ConfigurationChanged: [configuration_event.\ notify_configuration_change], + events.AlarmEventChanged: [alarm_event.\ + notify_alarm_event_change], } # type: Dict[Type[events.Event], Callable] COMMAND_HANDLERS = { commands.UpdateOCloud: ocloud_handler.update_ocloud, commands.UpdateDms: dms_handler.update_dms, + commands.UpdateAlarm: alarm_handler.update_alarm, commands.UpdateResourcePool: resourcepool_handler.update_resourcepool, commands.UpdatePserver: pserver_handler.update_pserver, commands.UpdatePserverCpu: pserver_cpu_handler.update_pserver_cpu, @@ -79,5 +83,6 @@ COMMAND_HANDLERS = { o2dms_cmmands.DeleteNfDeployment: nfdeployment_handler.delete_nfdeployment, commands.PubMessage2SMO: notify_handler.notify_change_to_smo, + commands.PubAlarm2SMO: notify_alarm_handler.notify_alarm_to_smo, commands.Register2SMO: registration_handler.registry_to_smo, } # type: Dict[Type[commands.Command], Callable] diff --git a/o2common/config/config.py b/o2common/config/config.py index 6e5d19f..e42c886 100644 --- a/o2common/config/config.py +++ b/o2common/config/config.py @@ -50,7 +50,11 @@ def get_root_api_base(): def get_o2ims_api_base(): - return get_root_api_base() + 'o2ims_infrastructureInventory/v1' + return get_root_api_base() + 'o2ims-infrastructureInventory/v1' + + +def get_o2ims_monitoring_api_base(): + return get_root_api_base() + 'o2ims-infrastructureMonitoring/v1' def get_provision_api_base(): @@ -150,6 +154,36 @@ def get_dc_access_info(): return os_client_args +def get_fm_access_info(): + try: + client_args = dict( + auth_url=os.environ.get('OS_AUTH_URL', _DEFAULT_STX_URL), + username=os.environ.get('OS_USERNAME', "admin"), + api_key=os.environ.get('OS_PASSWORD', "fakepasswd1"), + project_name=os.environ.get('OS_PROJECT_NAME', "admin"), + ) + except KeyError: + logger.error('Please source your RC file before execution, ' + 'e.g.: `source ~/downloads/admin-rc.sh`') + sys.exit(1) + + os_client_args = {} + for key, val in client_args.items(): + os_client_args['os_{key}'.format(key=key)] = val + auth_url = urlparse(os_client_args.pop('os_auth_url')) + + os_client_args['insecure'] = True + + os_client_args['auth_url'] = auth_url.geturl() + os_client_args['username'] = os_client_args.pop('os_username') + os_client_args['password'] = os_client_args.pop('os_api_key') + os_client_args['project_name'] = os_client_args.pop('os_project_name') + os_client_args['user_domain_name'] = 'Default' + os_client_args['project_domain_name'] = 'Default' + + return os_client_args + + def get_k8s_api_endpoint(): K8S_KUBECONFIG = os.environ.get("K8S_KUBECONFIG", None) K8S_APISERVER = os.environ.get("K8S_APISERVER", None) @@ -221,3 +255,17 @@ def get_helmcli_access(): helm_pass = os.environ.get("HELM_USER_PASSWD") return helm_host_with_port, helm_user, helm_pass + + +def get_alarm_yaml_filename(): + alarm_yaml_name = os.environ.get("ALARM_YAML") + if alarm_yaml_name is not None and os.path.isfile(alarm_yaml_name): + return alarm_yaml_name + return "/configs/alarm.yaml" + + +def get_events_yaml_filename(): + events_yaml_name = os.environ.get("EVENTS_YAML") + if events_yaml_name is not None and os.path.isfile(events_yaml_name): + return events_yaml_name + return "/configs/events.yaml" diff --git a/o2common/service/watcher/base.py b/o2common/service/watcher/base.py index a7d025e..0fc7853 100644 --- a/o2common/service/watcher/base.py +++ b/o2common/service/watcher/base.py @@ -43,7 +43,8 @@ class BaseWatcher(object): # return self._probe(parent) return cmds except Exception as ex: - logger.warning("Failed to probe resource due to: " + str(ex)) + logger.warning("Failed to probe %s watcher due to: %s - %s" % + (self._targetname(), type(ex), str(ex))) return [] def _probe(self, parent: object = None, tags: object = None) \ diff --git a/o2common/service/watcher/worker.py b/o2common/service/watcher/worker.py index 3eef230..d47fb27 100644 --- a/o2common/service/watcher/worker.py +++ b/o2common/service/watcher/worker.py @@ -48,7 +48,8 @@ class PollWorker(object): # logger.debug("about to probe:"+w) w.probe(None) except Exception as ex: - logger.warning("Worker raises exception:" + str(ex)) + logger.warning("Worker raises exception %s: %s - %s " + % (w, type(ex), str(ex))) continue # handle events diff --git a/o2ims/adapter/alarm_loader.py b/o2ims/adapter/alarm_loader.py new file mode 100644 index 0000000..823d0d5 --- /dev/null +++ b/o2ims/adapter/alarm_loader.py @@ -0,0 +1,42 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +from o2ims.domain import alarm_obj +from o2ims.domain.alarm_repo import AlarmDictionaryRepository +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +class AlarmDictionaryConfigFileRepository(AlarmDictionaryRepository): + def __init__(self): + super().__init__() + self.dictionary = {} + + def _add(self, alarm_dict: alarm_obj.AlarmDictionary): + self.dictionary[alarm_dict.entityType] = alarm_dict + + def _get(self, alarm_entity_type) -> alarm_obj.AlarmDictionary: + return self.dictionary[alarm_entity_type] + + def _list(self) -> List[alarm_obj.AlarmDictionary]: + return [alarm_dict for alarm_dict in self.dictionary.items()] + + def _update(self, alarm_dict: alarm_obj.AlarmDictionary): + self.dictionary[alarm_dict.entityType] = alarm_dict + + def _delete(self, alarm_entity_type): + if alarm_entity_type in self.dictionary.keys(): + del self.dictionary[alarm_entity_type] diff --git a/o2ims/adapter/alarm_repository.py b/o2ims/adapter/alarm_repository.py new file mode 100644 index 0000000..ef20e6a --- /dev/null +++ b/o2ims/adapter/alarm_repository.py @@ -0,0 +1,114 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List + +from o2ims.domain import alarm_obj +from o2ims.domain.alarm_repo import AlarmDefinitionRepository, \ + AlarmEventRecordRepository, AlarmSubscriptionRepository, \ + AlarmProbableCauseRepository +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +class AlarmEventRecordSqlAlchemyRepository(AlarmEventRecordRepository): + def __init__(self, session): + super().__init__() + self.session = session + + def _add(self, alarm_event_record: alarm_obj.AlarmEventRecord): + self.session.add(alarm_event_record) + + def _get(self, alarm_event_record_id) -> alarm_obj.AlarmEventRecord: + return self.session.query(alarm_obj.AlarmEventRecord).filter_by( + alarmEventRecordId=alarm_event_record_id).first() + + def _list(self) -> List[alarm_obj.AlarmEventRecord]: + return self.session.query(alarm_obj.AlarmEventRecord) + + def _update(self, alarm_event_record: alarm_obj.AlarmEventRecord): + self.session.add(alarm_event_record) + + def _delete(self, alarm_event_record_id): + self.session.query(alarm_obj.AlarmEventRecord).filter_by( + alarmEventRecordId=alarm_event_record_id).delete() + + +class AlarmDefinitionSqlAlchemyRepository(AlarmDefinitionRepository): + def __init__(self, session): + super().__init__() + self.session = session + + def _add(self, definition: alarm_obj.AlarmDefinition): + self.session.add(definition) + + def _get(self, definition_id) -> alarm_obj.AlarmDefinition: + return self.session.query(alarm_obj.AlarmDefinition).filter_by( + alarmDefinitionId=definition_id).first() + + def _list(self) -> List[alarm_obj.AlarmDefinition]: + return self.session.query(alarm_obj.AlarmDefinition) + + def _update(self, definition: alarm_obj.AlarmDefinition): + self.session.add(definition) + + def _delete(self, alarm_definition_id): + self.session.query(alarm_obj.AlarmDefinition).filter_by( + alarmDefinitionId=alarm_definition_id).delete() + + +class AlarmSubscriptionSqlAlchemyRepository(AlarmSubscriptionRepository): + def __init__(self, session): + super().__init__() + self.session = session + + def _add(self, subscription: alarm_obj.AlarmSubscription): + self.session.add(subscription) + + def _get(self, subscription_id) -> alarm_obj.AlarmSubscription: + return self.session.query(alarm_obj.AlarmSubscription).filter_by( + alarmSubscriptionId=subscription_id).first() + + def _list(self) -> List[alarm_obj.AlarmSubscription]: + return self.session.query(alarm_obj.AlarmSubscription) + + def _update(self, subscription: alarm_obj.AlarmSubscription): + self.session.add(subscription) + + def _delete(self, alarm_subscription_id): + self.session.query(alarm_obj.AlarmSubscription).filter_by( + alarmSubscriptionId=alarm_subscription_id).delete() + + +class AlarmProbableCauseSqlAlchemyRepository(AlarmProbableCauseRepository): + def __init__(self, session): + super().__init__() + self.session = session + + def _add(self, probable_cause: alarm_obj.ProbableCause): + self.session.add(probable_cause) + + def _get(self, probable_cause_id) -> alarm_obj.ProbableCause: + return self.session.query(alarm_obj.ProbableCause).filter_by( + probableCauseId=probable_cause_id).first() + + def _list(self) -> List[alarm_obj.ProbableCause]: + return self.session.query(alarm_obj.ProbableCause) + + def _update(self, probable_cause: alarm_obj.ProbableCause): + self.session.add(probable_cause) + + def _delete(self, probable_cause_id): + self.session.query(alarm_obj.ProbableCause).filter_by( + probableCauseId=probable_cause_id).delete() diff --git a/o2ims/adapter/clients/alarm_dict_client.py b/o2ims/adapter/clients/alarm_dict_client.py new file mode 100644 index 0000000..e15531a --- /dev/null +++ b/o2ims/adapter/clients/alarm_dict_client.py @@ -0,0 +1,161 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +import yaml +import errno +import collections +import uuid as uuid_gen + +from o2common.service import unit_of_work +from o2common.config import config +from o2ims.domain import alarm_obj as alarm + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +def load_alarm_dictionary_from_conf_file(conf_path: str, + uow: unit_of_work.AbstractUnitOfWork): + + logger.info("Converting alarm.yaml to dict: ") + + if not os.path.isfile(conf_path): + logger.error("file %s doesn't exist. Ending execution" % + (conf_path)) + raise OSError( + errno.ENOENT, os.strerror(errno.ENOENT), conf_path + ) + + try: + with open(conf_path, 'r') as stream: + alarm_yaml = yaml.load(stream, Loader=yaml.FullLoader) + dictionaries = alarm_yaml.get('dictionary') + except Exception as exp: + logger.error(exp) + raise RuntimeError(exp) + + for dictionary in list(dictionaries.keys()): + with uow: + # res_type = uow.resource_types.get_by_name(dictionary) + # logger.info('res_type: ' + res_type.resourceTypeName) + alarm_dict = alarm.AlarmDictionary(dictionary) + alarm_dict.entityType = dictionary + alarm_dict.alarmDictionaryVersion = \ + dictionaries[dictionary]['version'] + alarm_dict.alarmDefinition = \ + dictionaries[dictionary]['alarmDefinition'] + uow.alarm_dictionaries.add(alarm_dict) + + +def prettyDict(dict): + output = json.dumps(dict, sort_keys=True, indent=4) + return output + + +def load_alarm_definition(uow: unit_of_work.AbstractUnitOfWork): + logger.info("Converting events.yaml to dict: ") + EVENT_TYPES_FILE = config.get_events_yaml_filename() + + if not os.path.isfile(EVENT_TYPES_FILE): + logger.error("file %s doesn't exist. Ending execution" % + (EVENT_TYPES_FILE)) + raise OSError( + errno.ENOENT, os.strerror(errno.ENOENT), EVENT_TYPES_FILE + ) + + try: + with open(EVENT_TYPES_FILE, 'r') as stream: + event_types = yaml.load(stream, Loader=yaml.FullLoader) + except Exception as exp: + logger.error(exp) + raise RuntimeError(exp) + + for alarm_id in list(event_types.keys()): + if isinstance(alarm_id, float): + # force 3 digits after the decimal point, + # to include trailing zero's (ex.: 200.010) + formatted_alarm_id = "{:.3f}".format(alarm_id) + event_types[formatted_alarm_id] = event_types.pop(alarm_id) + + event_types = collections.OrderedDict(sorted(event_types.items())) + + yaml_event_list = [] + uneditable_descriptions = {'100.114', '200.007', + '200.02', '200.021', '200.022', '800.002'} + + # Parse events.yaml dict, and add any new alarm to definition table: + logger.info( + "Parsing events.yaml and adding any new alarm to definition table: ") + for event_type in event_types: + + if event_types.get(event_type).get('Type') == "Alarm": + event_uuid = str(uuid_gen.uuid3( + uuid_gen.NAMESPACE_URL, str(event_type))) + + string_event_type = str(event_type) + + yaml_event_list.append(string_event_type) + + if str(event_type) not in uneditable_descriptions: + event_description = (event_types.get(event_type) + .get('Description')) + else: + event_description = event_types.get( + event_type).get('Description') + + event_description = str(event_description) + event_description = (event_description[:250] + ' ...') \ + if len(event_description) > 250 else event_description + prop_action = event_types.get( + event_type).get("Proposed_Repair_Action") + + with uow: + alarm_def = uow.alarm_definitions.get(event_uuid) + event_mgmt_affecting = str(event_types.get(event_type).get( + 'Management_Affecting_Severity', 'warning')) +# + event_degrade_affecting = str(event_types.get(event_type).get( + 'Degrade_Affecting_Severity', 'none')) + + if alarm_def: + alarm_def.description = event_description + alarm_def.mgmt_affecting = event_mgmt_affecting + alarm_def.degrade_affecting = event_degrade_affecting + else: + alarm_def = alarm.AlarmDefinition( + id=event_uuid, + name=str(event_type), + last_change=alarm.AlarmLastChangeEnum.ADDED, + desc=event_description, prop_action=prop_action, + clearing_type=alarm.ClearingTypeEnum.MANUAL, + pk_noti_field="" + ) + logger.info(str(event_type)) + uow.alarm_definitions.add(alarm_def) + + uow.commit() + + prob_cause = event_types.get(event_type).get("Probable_Cause") + prob_cause_uuid = str(uuid_gen.uuid3( + uuid_gen.NAMESPACE_URL, prob_cause)) + + with uow: + probable_cause = uow.alarm_probable_causes.get(prob_cause_uuid) + if probable_cause is None: + pc = alarm.ProbableCause( + prob_cause_uuid, prob_cause, prob_cause) + uow.alarm_probable_causes.add(pc) + uow.commit() diff --git a/o2ims/adapter/clients/fault_client.py b/o2ims/adapter/clients/fault_client.py new file mode 100644 index 0000000..b37a4d2 --- /dev/null +++ b/o2ims/adapter/clients/fault_client.py @@ -0,0 +1,191 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# client talking to Stx standalone + +from typing import List # Optional, Set +import uuid as uuid + +# from dcmanagerclient.api import client +# from cgtsclient.client import get_client as get_stx_client +# from cgtsclient.exc import EndpointException +# from dcmanagerclient.api.client import client as get_dc_client +from fmclient.client import get_client as get_fm_client +from fmclient.common.exceptions import HTTPNotFound + +from o2common.service.client.base_client import BaseClient +from o2common.config import config +from o2ims.domain import alarm_obj as alarmModel +from o2ims.domain.resource_type import ResourceTypeEnum +from o2app.adapter import unit_of_work + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +CGTSCLIENT_ENDPOINT_ERROR_MSG = \ + 'Must provide Keystone credentials or user-defined endpoint and token' + + +class StxAlarmClient(BaseClient): + def __init__(self, uow: unit_of_work.AbstractUnitOfWork, driver=None): + super().__init__() + self.driver = driver if driver else StxFaultClientImp() + self.uow = uow + + def _get(self, id) -> alarmModel.FaultGenericModel: + return self.driver.getAlarmInfo(id) + + def _list(self, **filters) -> List[alarmModel.FaultGenericModel]: + # filters['resourcetypeid'] + newmodels = self.driver.getAlarmList(**filters) + uow = self.uow + exist_alarms = {} + with uow: + rs = uow.session.execute( + ''' + SELECT "alarmEventRecordId" + FROM "alarmEventRecord" + WHERE "perceivedSeverity" != :perceived_severity_enum + ''', + dict(perceived_severity_enum=alarmModel.PerceivedSeverityEnum. + CLEARED) + ) + for row in rs: + id = row[0] + # logger.debug('Exist alarm: ' + id) + exist_alarms[id] = False + + ret = [] + for m in newmodels: + try: + if exist_alarms[m.id]: + ret.append(m) + exist_alarms[m.id] = True + except KeyError: + logger.debug('alarm new: ' + m.id) + ret.append(m) + + for alarm in exist_alarms: + logger.debug('exist alarm: ' + alarm) + if exist_alarms[alarm]: + # exist alarm is active + continue + event = self._get(alarm) + ret.append(event) + + return ret + + def _set_stx_client(self): + pass + + +class StxEventClient(BaseClient): + def __init__(self, driver=None): + super().__init__() + self.driver = driver if driver else StxFaultClientImp() + + def _get(self, id) -> alarmModel.FaultGenericModel: + return self.driver.getEventInfo(id) + + def _list(self, **filters) -> List[alarmModel.FaultGenericModel]: + return self.driver.getEventList(**filters) + + def _set_stx_client(self): + pass + + +# internal driver which implement client call to Stx Fault Management instance +class StxFaultClientImp(object): + def __init__(self, fm_client=None): + super().__init__() + self.fmclient = fm_client if fm_client else self.getFmClient() + # if subcloud_id is not None: + # self.stxclient = self.getSubcloudClient(subcloud_id) + + def getFmClient(self): + os_client_args = config.get_fm_access_info() + config_client = get_fm_client(1, **os_client_args) + return config_client + + def getAlarmList(self, **filters) -> List[alarmModel.FaultGenericModel]: + alarms = self.fmclient.alarm.list(expand=True) + if len(alarms) == 0: + return [] + logger.debug('alarm 1:' + str(alarms[0].to_dict())) + # [print('alarm:' + str(alarm.to_dict())) for alarm in alarms if alarm] + return [alarmModel.FaultGenericModel( + ResourceTypeEnum.PSERVER, self._alarmconverter(alarm)) + for alarm in alarms if alarm] + + def getAlarmInfo(self, id) -> alarmModel.FaultGenericModel: + try: + alarm = self.fmclient.alarm.get(id) + logger.debug('get alarm id ' + id + ':' + str(alarm.to_dict())) + # print(alarm.to_dict()) + except HTTPNotFound: + event = self.fmclient.event_log.get(id) + return alarmModel.FaultGenericModel( + ResourceTypeEnum.PSERVER, self._eventconverter(event, True)) + return alarmModel.FaultGenericModel( + ResourceTypeEnum.PSERVER, self._alarmconverter(alarm)) + + def getEventList(self, **filters) -> List[alarmModel.FaultGenericModel]: + events = self.fmclient.event_log.list(alarms=True, expand=True) + logger.debug('event 1:' + str(events[0].to_dict())) + # [print('alarm:' + str(event.to_dict())) for event in events if event] + return [alarmModel.FaultGenericModel( + ResourceTypeEnum.PSERVER, self._eventconverter(event)) + for event in events if event] + + def getEventInfo(self, id) -> alarmModel.FaultGenericModel: + event = self.fmclient.event_log.get(id) + logger.debug('get event id ' + id + ':' + str(event.to_dict())) + # print(event.to_dict()) + return alarmModel.FaultGenericModel( + ResourceTypeEnum.PSERVER, self._eventconverter(event)) + + @ staticmethod + def _alarmconverter(alarm): + # setattr(alarm, 'alarm_def_id', uuid.uuid3( + # uuid.NAMESPACE_URL, alarm.alarm_id)) + setattr(alarm, 'state', alarm.alarm_state) + setattr(alarm, 'event_log_type', alarm.alarm_type) + setattr(alarm, 'event_log_id', alarm.alarm_id) + + setattr(alarm, 'alarm_def_id', uuid.uuid3( + uuid.NAMESPACE_URL, alarm.alarm_id)) + setattr(alarm, 'probable_cause_id', uuid.uuid3( + uuid.NAMESPACE_URL, alarm.probale_cause)) + return alarm + + @ staticmethod + def _eventconverter(event, clear=False): + setattr(event, 'alarm_id', event.event_log_id) + setattr(event, 'alarm_type', event.event_log_type) + if clear: + logger.debug('alarm is clear') + event.state = 'clear' + setattr(event, 'alarm_def_id', uuid.uuid3( + uuid.NAMESPACE_URL, event.alarm_id)) + setattr(event, 'probable_cause_id', uuid.uuid3( + uuid.NAMESPACE_URL, event.probale_cause)) + return event + + @ staticmethod + def _alarmeventhasher(event, state=''): + # The event model and the alarm model have different parameter name + # of the state. alarm model is alarm_state, event model is state. + status = event.alarm_state if state == '' else state + return str(hash((event.uuid, event.timestamp, status))) diff --git a/o2ims/adapter/orm.py b/o2ims/adapter/orm.py index cb3a694..4775f29 100644 --- a/o2ims/adapter/orm.py +++ b/o2ims/adapter/orm.py @@ -37,7 +37,9 @@ from sqlalchemy.orm import mapper, relationship from o2ims.domain import ocloud as ocloudModel from o2ims.domain import subscription_obj as subModel from o2ims.domain import configuration_obj as confModel +from o2ims.domain import alarm_obj as alarmModel from o2ims.domain.resource_type import ResourceTypeEnum +# from o2ims.domain.alarm_obj import AlarmLastChangeEnum, PerceivedSeverityEnum from o2common.helper import o2logging logger = o2logging.get_logger(__name__) @@ -163,6 +165,66 @@ configuration = Table( Column("comments", String(255)), ) +alarm_definition = Table( + "alarmDefinition", + metadata, + Column("updatetime", DateTime), + Column("createtime", DateTime), + + Column("alarmDefinitionId", String(255), primary_key=True), + Column("alarmName", String(255), unique=True), + Column("alarmLastChange", String(255)), + Column("alarmDescription", String(255)), + Column("proposeRepairActions", String(255)), + Column("clearingType", String(255)), + Column("managementInterfaceId", String(255)), + Column("pkNotificationField", String(255)) +) + +alarm_event_record = Table( + "alarmEventRecord", + metadata, + Column("updatetime", DateTime), + Column("createtime", DateTime), + Column("hash", String(255)), + + Column("alarmEventRecordId", String(255), primary_key=True), + Column("resourceTypeId", ForeignKey("resourcetype.resourceTypeId")), + Column("resourceId", ForeignKey("resource.resourceId")), + Column("alarmDefinitionId", ForeignKey( + "alarmDefinition.alarmDefinitionId")), + Column("probableCauseId", String(255)), + Column("perceivedSeverity", Integer), + Column("alarmRaisedTime", String(255)), + Column("alarmChangedTime", String(255)), + Column("alarmAcknowledgeTime", String(255)), + Column("alarmAcknowledged", String(255)), +) + +alarm_probable_cause = Table( + "probableCause", + metadata, + Column("updatetime", DateTime), + Column("createtime", DateTime), + Column("hash", String(255)), + + Column("probableCauseId", String(255), primary_key=True), + Column("name", String(255)), + Column("description", String(255)), +) + +alarm_subscription = Table( + "alarmSubscription", + metadata, + Column("updatetime", DateTime), + Column("createtime", DateTime), + + Column("alarmSubscriptionId", String(255), primary_key=True), + Column("callback", String(255)), + Column("consumerSubscriptionId", String(255)), + Column("filter", String(255)), +) + @retry((exc.IntegrityError), tries=3, delay=2) def wait_for_metadata_ready(engine): @@ -174,6 +236,7 @@ def wait_for_metadata_ready(engine): def start_o2ims_mappers(engine=None): logger.info("Starting O2 IMS mappers") + # IMS Infrastructure Inventory Mappering dm_mapper = mapper(ocloudModel.DeploymentManager, deploymentmanager) resourcepool_mapper = mapper(ocloudModel.ResourcePool, resourcepool) resourcetype_mapper = mapper(ocloudModel.ResourceType, resourcetype) @@ -196,5 +259,11 @@ def start_o2ims_mappers(engine=None): mapper(subModel.Subscription, subscription) mapper(confModel.Configuration, configuration) + # IMS Infrastruture Monitoring Mappering + mapper(alarmModel.AlarmEventRecord, alarm_event_record) + mapper(alarmModel.AlarmDefinition, alarm_definition) + mapper(alarmModel.ProbableCause, alarm_probable_cause) + mapper(alarmModel.AlarmSubscription, alarm_subscription) + if engine is not None: wait_for_metadata_ready(engine) diff --git a/o2ims/domain/alarm_obj.py b/o2ims/domain/alarm_obj.py new file mode 100644 index 0000000..fa7cba2 --- /dev/null +++ b/o2ims/domain/alarm_obj.py @@ -0,0 +1,188 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from enum import Enum +import json +import datetime + +from o2common.domain.base import AgRoot, Serializer + + +class FaultGenericModel(AgRoot): + def __init__(self, type: str, + api_response: dict = None, content_hash=None) -> None: + super().__init__() + if api_response: + self.id = str(api_response.uuid) + self.name = self.id + self.type = type + self.status = api_response.state + # TODO: time less than second + self.timestamp = datetime.datetime.strptime( + api_response.timestamp.split('.')[0], "%Y-%m-%dT%H:%M:%S") \ + if api_response.timestamp else None + + # if hasattr(api_response, 'alarm_id'): + # self.alarm_id = api_response.alarm_id + # elif hasattr(api_response, 'event_log_id'): + # self.alarm_id = api_response.event_log_id + + self.hash = content_hash if content_hash \ + else str(hash((self.id, self.timestamp, self.status))) + self.content = json.dumps(api_response.to_dict()) + + def is_outdated(self, newmodel) -> bool: + # return self.updatetime < newmodel.updatetime + # logger.warning("hash1: " + self.hash + " vs hash2: " + newmodel.hash) + return self.hash != newmodel.hash + + def update_by(self, newmodel) -> None: + if self.id != newmodel.id: + pass + # raise MismatchedModel("Mismatched model") + self.name = newmodel.name + self.createtime = newmodel.createtime + self.updatetime = newmodel.updatetime + self.content = newmodel.content + + +class PerceivedSeverityEnum(str, Enum): + CRITICAL = 0 + MAJOR = 1 + MINOR = 2 + WARNING = 3 + INDETERMINATE = 4 + CLEARED = 5 + + +class AlarmEventRecord(AgRoot, Serializer): + def __init__(self, id: str, res_type_id: str, res_id: str, + alarm_def_id: str, probable_cause_id: str, + raised_time: str, + perc_severity: PerceivedSeverityEnum = + PerceivedSeverityEnum.WARNING + ) -> None: + super().__init__() + self.alarmEventRecordId = id + self.resourceTypeId = res_type_id + self.resourceId = res_id + self.alarmDefinitionId = alarm_def_id + self.probableCauseId = probable_cause_id + self.perceivedSeverity = perc_severity + self.alarmRaisedTime = raised_time + self.alarmChangedTime = '' + self.alarmAcknowledgeTime = '' + self.alarmAcknowledged = False + self.extensions = [] + + +class ProbableCause(AgRoot, Serializer): + def __init__(self, id: str, name: str, desc: str = '') -> None: + super().__init__() + self.probableCauseId = id + self.name = name + self.description = desc + + +class AlarmLastChangeEnum(str, Enum): + ADDED = 'ADDED' + DELETED = 'DELETED' + MODIFYED = 'MODIFYED' + + +class ClearingTypeEnum(str, Enum): + AUTOMATIC = 'AUTOMATIC' + MANUAL = 'MANUAL' + + +class AlarmDefinition(AgRoot, Serializer): + def __init__(self, id: str, name: str, last_change: AlarmLastChangeEnum, + desc: str, prop_action: str, clearing_type: ClearingTypeEnum, + pk_noti_field: str) -> None: + super().__init__() + self.alarmDefinitionId = id + self.alarmName = name + self.alarmLastChange = last_change + self.alarmDescription = desc + self.proposedRepairActions = prop_action + self.clearingType = clearing_type + self.managementInterfaceId = "O2IMS" + self.pkNotificationField = pk_noti_field + self.alarmAdditionalFields = "" + + +class AlarmDictionary(AgRoot, Serializer): + def __init__(self, id: str) -> None: + super().__init__() + self.id = id + self.alarmDictionaryVersion = "" + self.alarmDictionarySchemaVersion = "" + self.entityType = "" + self.vendor = "" + self.managementInterfaceId = "O2IMS" + self.pkNotificationField = "" + self.alarmDefinition = "" + + +class AlarmNotificationEventEnum(str, Enum): + NEW = 0 + CHANGE = 1 + CLEAR = 2 + ACKNOWLEDGE = 3 + + +class AlarmEvent2SMO(Serializer): + def __init__(self, eventtype: AlarmNotificationEventEnum, + id: str, ref: str, updatetime: str) -> None: + self.notificationEventType = eventtype + self.objectRef = ref + self.id = id + self.updatetime = updatetime + + +class AlarmSubscription(AgRoot, Serializer): + def __init__(self, id: str, callback: str, consumersubid: str = '', + filter: str = '') -> None: + super().__init__() + self.alarmSubscriptionId = id + self.version_number = 0 + self.callback = callback + self.consumerSubscriptionId = consumersubid + self.filter = filter + + +class AlarmEventNotification(AgRoot, Serializer): + def __init__(self, alarm: AlarmEventRecord, to_smo: AlarmEvent2SMO, + consumersubid: str) -> None: + super().__init__() + self.globalCloudId = '' + self.consumerSubscriptionId = consumersubid + self._convert_params(alarm, to_smo) + + def _convert_params(self, alarm: AlarmEventRecord, to_smo: AlarmEvent2SMO): + self.notificationEventType = to_smo.notificationEventType + self.objectRef = to_smo.objectRef + + self.alarmEventRecordId = alarm.alarmEventRecordId + self.resourceTypeId = alarm.resourceTypeId + self.resourceId = alarm.resourceId + self.alarmDefinitionId = alarm.alarmDefinitionId + self.probableCauseId = alarm.probableCauseId + self.perceivedSeverity = alarm.perceivedSeverity + self.alarmRaisedTime = alarm.alarmRaisedTime + self.alarmChangedTime = alarm.alarmChangedTime + self.alarmAcknowledgeTime = alarm.alarmAcknowledgeTime + self.alarmAcknowledged = alarm.alarmAcknowledged + self.extensions = [] diff --git a/o2ims/domain/alarm_repo.py b/o2ims/domain/alarm_repo.py new file mode 100644 index 0000000..d3e7f52 --- /dev/null +++ b/o2ims/domain/alarm_repo.py @@ -0,0 +1,221 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from typing import List, Set +from o2ims.domain import alarm_obj as obj + + +class AlarmEventRecordRepository(abc.ABC): + def __init__(self): + self.seen = set() # type: Set[obj.AlarmEventRecord] + + def add(self, alarm_event_record: obj.AlarmEventRecord): + self._add(alarm_event_record) + self.seen.add(alarm_event_record) + + def get(self, alarm_event_record_id) -> obj.AlarmEventRecord: + alarm_event_record = self._get(alarm_event_record_id) + if alarm_event_record: + self.seen.add(alarm_event_record) + return alarm_event_record + + def list(self) -> List[obj.AlarmEventRecord]: + return self._list() + + def update(self, alarm_event_record: obj.AlarmEventRecord): + self._update(alarm_event_record) + + def delete(self, alarm_event_record_id): + self._delete(alarm_event_record_id) + + @abc.abstractmethod + def _add(self, alarm_event_record: obj.AlarmEventRecord): + raise NotImplementedError + + @abc.abstractmethod + def _get(self, alarm_event_record_id) -> obj.AlarmEventRecord: + raise NotImplementedError + + @abc.abstractmethod + def _list(self) -> List[obj.AlarmEventRecord]: + raise NotImplementedError + + @abc.abstractmethod + def _update(self, alarm_event_record: obj.AlarmEventRecord): + raise NotImplementedError + + @abc.abstractmethod + def _delete(self, alarm_event_record_id): + raise NotImplementedError + + +class AlarmDefinitionRepository(abc.ABC): + def __init__(self): + self.seen = set() # type: Set[obj.AlarmDefinition] + + def add(self, definition: obj.AlarmDefinition): + self._add(definition) + self.seen.add(definition) + + def get(self, definition_id) -> obj.AlarmDefinition: + definition = self._get(definition_id) + if definition: + self.seen.add(definition) + return definition + + def list(self) -> List[obj.AlarmDefinition]: + return self._list() + + def update(self, definition: obj.AlarmDefinition): + self._update(definition) + + def delete(self, definition_id): + self._delete(definition_id) + + @abc.abstractmethod + def _add(self, definition: obj.AlarmDefinition): + raise NotImplementedError + + @abc.abstractmethod + def _get(self, definition_id) -> obj.AlarmDefinition: + raise NotImplementedError + + @abc.abstractmethod + def _update(self, definition: obj.AlarmDefinition): + raise NotImplementedError + + @abc.abstractmethod + def _delete(self, definition_id): + raise NotImplementedError + + +class AlarmDictionaryRepository(abc.ABC): + def __init__(self): + self.seen = set() # type: Set[obj.AlarmDictionary] + + def add(self, dictionary: obj.AlarmDictionary): + self._add(dictionary) + self.seen.add(dictionary) + + def get(self, dictionary_id) -> obj.AlarmDictionary: + dictionary = self._get(dictionary_id) + if dictionary: + self.seen.add(dictionary) + return dictionary + + def list(self) -> List[obj.AlarmDictionary]: + return self._list() + + def update(self, dictionary: obj.AlarmDictionary): + self._update(dictionary) + + def delete(self, dictionary_id): + self._delete(dictionary_id) + + @abc.abstractmethod + def _add(self, dictionary: obj.AlarmDictionary): + raise NotImplementedError + + @abc.abstractmethod + def _get(self, dictionary_id) -> obj.AlarmDictionary: + raise NotImplementedError + + @abc.abstractmethod + def _update(self, dictionary: obj.AlarmDictionary): + raise NotImplementedError + + @abc.abstractmethod + def _delete(self, dictionary_id): + raise NotImplementedError + + +class AlarmSubscriptionRepository(abc.ABC): + def __init__(self): + self.seen = set() # type: Set[obj.AlarmSubscription] + + def add(self, subscription: obj.AlarmSubscription): + self._add(subscription) + self.seen.add(subscription) + + def get(self, subscription_id) -> obj.AlarmSubscription: + subscription = self._get(subscription_id) + if subscription: + self.seen.add(subscription) + return subscription + + def list(self) -> List[obj.AlarmSubscription]: + return self._list() + + def update(self, subscription: obj.AlarmSubscription): + self._update(subscription) + + def delete(self, subscription_id): + self._delete(subscription_id) + + @abc.abstractmethod + def _add(self, subscription: obj.AlarmSubscription): + raise NotImplementedError + + @abc.abstractmethod + def _get(self, subscription_id) -> obj.AlarmSubscription: + raise NotImplementedError + + @abc.abstractmethod + def _update(self, subscription: obj.AlarmSubscription): + raise NotImplementedError + + @abc.abstractmethod + def _delete(self, subscription_id): + raise NotImplementedError + + +class AlarmProbableCauseRepository(abc.ABC): + def __init__(self): + self.seen = set() # type: Set[obj.ProbableCause] + + def add(self, probable_cause: obj.ProbableCause): + self._add(probable_cause) + self.seen.add(probable_cause) + + def get(self, probable_cause_id) -> obj.ProbableCause: + probable_cause = self._get(probable_cause_id) + if probable_cause: + self.seen.add(probable_cause) + return probable_cause + + def list(self) -> List[obj.ProbableCause]: + return self._list() + + def update(self, probable_cause: obj.ProbableCause): + self._update(probable_cause) + + def delete(self, probable_cause_id): + self._delete(probable_cause_id) + + @abc.abstractmethod + def _add(self, probable_cause: obj.ProbableCause): + raise NotImplementedError + + @abc.abstractmethod + def _get(self, probable_cause_id) -> obj.ProbableCause: + raise NotImplementedError + + @abc.abstractmethod + def _update(self, probable_cause: obj.ProbableCause): + raise NotImplementedError + + @abc.abstractmethod + def _delete(self, probable_cause_id): + raise NotImplementedError diff --git a/o2ims/domain/commands.py b/o2ims/domain/commands.py index bcd6e86..4ab1b25 100644 --- a/o2ims/domain/commands.py +++ b/o2ims/domain/commands.py @@ -14,12 +14,13 @@ # pylint: disable=too-few-public-methods # from datetime import date -# from typing import Optional from dataclasses import dataclass -# from datetime import datetime -# from o2ims.domain.resource_type import ResourceTypeEnum +# from typing import List + from o2ims.domain.stx_object import StxGenericModel +from o2ims.domain.alarm_obj import AlarmEvent2SMO from o2ims.domain.subscription_obj import Message2SMO, RegistrationMessage +# from o2ims.domain.resource_type import ResourceTypeEnum from o2common.domain.commands import Command @@ -33,6 +34,11 @@ class PubMessage2SMO(Command): data: Message2SMO +@dataclass +class PubAlarm2SMO(Command): + data: AlarmEvent2SMO + + @dataclass class Register2SMO(Command): data: RegistrationMessage @@ -91,3 +97,8 @@ class UpdatePserverIf(UpdateResource): @dataclass class UpdatePserverIfPort(UpdateResource): pass + + +@dataclass +class UpdateAlarm(UpdateStxObject): + pass diff --git a/o2ims/domain/events.py b/o2ims/domain/events.py index 4858040..eb0250f 100644 --- a/o2ims/domain/events.py +++ b/o2ims/domain/events.py @@ -18,6 +18,7 @@ from datetime import datetime from o2common.domain.events import Event from o2ims.domain.subscription_obj import NotificationEventEnum +from o2ims.domain.alarm_obj import AlarmNotificationEventEnum @dataclass @@ -52,3 +53,10 @@ class ResourceChanged(Event): class ConfigurationChanged(Event): id: str updatetime: datetime.now() + + +@dataclass +class AlarmEventChanged(Event): + id: str + notificationEventType: AlarmNotificationEventEnum + updatetime: datetime.now() diff --git a/o2ims/service/auditor/alarm_handler.py b/o2ims/service/auditor/alarm_handler.py new file mode 100644 index 0000000..6288531 --- /dev/null +++ b/o2ims/service/auditor/alarm_handler.py @@ -0,0 +1,230 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=unused-argument +from __future__ import annotations +import json + +# from o2common.config import config +# from o2common.service.messagebus import MessageBus +from o2common.service.unit_of_work import AbstractUnitOfWork +from o2ims.domain import events, commands, alarm_obj +from o2ims.domain.alarm_obj import AlarmEventRecord, FaultGenericModel,\ + AlarmNotificationEventEnum + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +def update_alarm( + cmd: commands.UpdateAlarm, + uow: AbstractUnitOfWork +): + fmobj = cmd.data + logger.info("add alarm event record:" + fmobj.name + + " update_at: " + str(fmobj.updatetime) + + " id: " + str(fmobj.id) + + " hash: " + str(fmobj.hash)) + with uow: + logger.debug('+++test alarm dict:' + + str(len(uow.alarm_dictionaries.list()))) + alarm_event_record = uow.alarm_event_records.get(fmobj.id) + if not alarm_event_record: + logger.info("add alarm event record:" + fmobj.name + + " update_at: " + str(fmobj.updatetime) + + " id: " + str(fmobj.id) + + " hash: " + str(fmobj.hash)) + localmodel = create_by(fmobj) + content = json.loads(fmobj.content) + entity_type_id = content['entity_type_id'] + entity_instance_id = content['entity_instance_id'] + logger.info('alarm entity instance id: ' + entity_instance_id) + if 'host' == entity_type_id: + # TODO: handle different resource type + hostname = entity_instance_id.split('.')[0].split('=')[1] + logger.debug('hostname: ' + hostname) + respools = uow.resource_pools.list() + respoolids = [respool.resourcePoolId for respool in + respools if respool.oCloudId == + respool.resourcePoolId] + restype = uow.resource_types.get_by_name('pserver') + localmodel.resourceTypeId = restype.resourceTypeId + hosts = uow.resources.list(respoolids[0], **{ + 'resourceTypeId': restype.resourceTypeId + }) + for host in hosts: + if host.name == hostname: + localmodel.resourceId = host.resourceId + uow.alarm_event_records.add(localmodel) + logger.info("Add the alarm event record: " + fmobj.id + + ", name: " + fmobj.name) + # localmodel.resourceTypeId = check_restype_id(uow, fmobj) + # logger.debug("resource type ID: " + localmodel.resourceTypeId) + # localmodel.resourceId = check_res_id(uow, fmobj) + # logger.debug("resource ID: " + localmodel.resourceId) + # uow.alarm_event_records.add(localmodel) + + else: + localmodel = alarm_event_record + if is_outdated(localmodel, fmobj): + logger.info("update alarm event record:" + fmobj.name + + " update_at: " + str(fmobj.updatetime) + + " id: " + str(fmobj.id) + + " hash: " + str(fmobj.hash)) + update_by(localmodel, fmobj) + uow.alarm_event_records.update(localmodel) + + logger.info("Update the alarm event record: " + fmobj.id + + ", name: " + fmobj.name) + uow.commit() + + +def is_outdated(alarm_event_record: AlarmEventRecord, + fmobj: FaultGenericModel): + return True if alarm_event_record.hash != fmobj.hash else False + + +def create_by(fmobj: FaultGenericModel) -> AlarmEventRecord: + content = json.loads(fmobj.content) + # globalcloudId = fmobj.id # to be updated + alarm_definition_id = fmobj.alarm_def_id + alarm_event_record = AlarmEventRecord( + fmobj.id, "", "", + alarm_definition_id, "", + fmobj.timestamp) + + def severity_switch(val): + if val == 'critical': + return alarm_obj.PerceivedSeverityEnum.CRITICAL + elif val == 'major': + return alarm_obj.PerceivedSeverityEnum.MAJOR + elif val == 'minor': + return alarm_obj.PerceivedSeverityEnum.MINOR + else: + return alarm_obj.PerceivedSeverityEnum.WARNING + alarm_event_record.perceivedSeverity = severity_switch(content['severity']) + alarm_event_record.probableCauseId = content['probable_cause_id'] + alarm_event_record.hash = fmobj.hash + # logger.info('severity: ' + content['severity']) + # logger.info('perceived severity: ' + # + alarm_event_record.perceivedSeverity) + alarm_event_record.events.append(events.AlarmEventChanged( + id=fmobj.id, + notificationEventType=AlarmNotificationEventEnum.NEW, + updatetime=fmobj.updatetime + )) + + return alarm_event_record + + +def update_by(target: AlarmEventRecord, fmobj: FaultGenericModel + ) -> None: + # content = json.loads(fmobj.content) + target.hash = fmobj.hash + if fmobj.status == 'clear': + target.perceivedSeverity = alarm_obj.PerceivedSeverityEnum.CLEARED + target.events.append(events.AlarmEventChanged( + id=fmobj.id, + notificationEventType=AlarmNotificationEventEnum.CLEAR, + updatetime=fmobj.updatetime + )) + + +def check_restype_id(uow: AbstractUnitOfWork, fmobj: FaultGenericModel) -> str: + content = json.loads(fmobj.content) + entity_type_id = content['entity_type_id'] + # Entity_Instance_ID: .lvmthinpool=/ + # Entity_Instance_ID: ["image=, instance=", + # Entity_Instance_ID: [host=.command=provision, + # Entity_Instance_ID: [host=.event=discovered, + # Entity_Instance_ID: [host=.state=disabled, + # Entity_Instance_ID: [subcloud=.resource=] + # Entity_Instance_ID: cinder_io_monitor + # Entity_Instance_ID: cluster= + # Entity_Instance_ID: cluster=.peergroup= + # Entity_Instance_ID: fs_name= + # Entity_Instance_ID: host= + # Entity_Instance_ID: host=.network= + # Entity_Instance_ID: host=.services=compute + # Entity_Instance_ID: host= + # Entity_Instance_ID: host=,agent=, + # bgp-peer= + # Entity_Instance_ID: host=.agent= + # Entity_Instance_ID: host=.interface= + # Entity_Instance_ID: host=.interface= + # Entity_Instance_ID: host=.ml2driver= + # Entity_Instance_ID: host=.network= + # Entity_Instance_ID: host=.openflow-controller= + # Entity_Instance_ID: host=.openflow-network= + # Entity_Instance_ID: host=.port= + # Entity_Instance_ID: host=.port= + # Entity_Instance_ID: host=.process= + # Entity_Instance_ID: host=.processor= + # Entity_Instance_ID: host=.sdn-controller= + # Entity_Instance_ID: host=.sensor= + # Entity_Instance_ID: host=.service= + # Entity_Instance_ID: host=.service=networking.providernet= + # + # Entity_Instance_ID: host=controller + # Entity_Instance_ID: itenant=.instance= + # Entity_Instance_ID: k8s_application= + # Entity_Instance_ID: kubernetes=PV-migration-failed + # Entity_Instance_ID: orchestration=fw-update + # Entity_Instance_ID: orchestration=kube-rootca-update + # Entity_Instance_ID: orchestration=kube-upgrade + # Entity_Instance_ID: orchestration=sw-patch + # Entity_Instance_ID: orchestration=sw-upgrade + # Entity_Instance_ID: resource=,name= + # Entity_Instance_ID: server-group + # Entity_Instance_ID: service=networking.providernet= + # Entity_Instance_ID: service_domain=.service_group= + # Entity_Instance_ID: service_domain=.service_group=. + # host= + # Entity_Instance_ID: service_domain=.service_group= + # + # Entity_Instance_ID: service_domain=.service_group= + # .host= + # Entity_Instance_ID: storage_backend= + # Entity_Instance_ID: subcloud= + # Entity_Instance_ID: subsystem=vim + # Entity_Instance_ID: tenant=.instance= + if 'host' == entity_type_id: + with uow: + restype = uow.resource_types.get_by_name('pserver') + return restype.resourceTypeId + else: + return "" + + +def check_res_id(uow: AbstractUnitOfWork, fmobj: FaultGenericModel) -> str: + content = json.loads(fmobj.content) + entity_type_id = content['entity_type_id'] + entity_instance_id = content['entity_instance_id'] + if 'host' == entity_type_id: + logger.info('host: ' + entity_instance_id) + hostname = entity_instance_id.split('.')[0].split('=')[1] + with uow: + respools = uow.resource_pools.list() + respoolids = [respool.resourcePoolId for respool in respools + if respool.oCloudId == respool.resourcePoolId] + restype = uow.resource_types.get_by_name('pserver') + hosts = uow.resources.list(respoolids[0], **{ + 'resourceTypeId': restype.resourceTypeId + }) + for host in hosts: + if host.name == hostname: + return host.resourceId + else: + return "" diff --git a/o2ims/service/command/notify_alarm_handler.py b/o2ims/service/command/notify_alarm_handler.py new file mode 100644 index 0000000..b2ca61c --- /dev/null +++ b/o2ims/service/command/notify_alarm_handler.py @@ -0,0 +1,67 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +# import redis +# import requests +import http.client +from urllib.parse import urlparse + +# from o2common.config import config +from o2common.service.unit_of_work import AbstractUnitOfWork +from o2ims.domain import commands +from o2ims.domain.alarm_obj import AlarmSubscription, AlarmEvent2SMO + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +def notify_alarm_to_smo( + cmd: commands.PubAlarm2SMO, + uow: AbstractUnitOfWork, +): + logger.info('In notify_alarm_to_smo') + data = cmd.data + with uow: + subs = uow.alarm_subscriptions.list() + for sub in subs: + sub_data = sub.serialize() + logger.debug('Alarm Subscription: {}'.format( + sub_data['alarmSubscriptionId'])) + + callback_smo(sub, data) + + +def callback_smo(sub: AlarmSubscription, msg: AlarmEvent2SMO): + sub_data = sub.serialize() + callback_data = json.dumps({ + 'consumerSubscriptionId': sub_data['consumerSubscriptionId'], + 'notificationEventType': msg.notificationEventType, + 'objectRef': msg.objectRef, + 'updateTime': msg.updatetime + }) + logger.info('URL: {}, data: {}'.format( + sub_data['callback'], callback_data)) + o = urlparse(sub_data['callback']) + conn = http.client.HTTPConnection(o.netloc) + headers = {'Content-type': 'application/json'} + conn.request('POST', o.path, callback_data, headers) + resp = conn.getresponse() + data = resp.read().decode('utf-8') + # json_data = json.loads(data) + if resp.status == 202 or resp.status == 200: + logger.info('Notify to SMO successed, response code {} {}, data {}'. + format(resp.status, resp.reason, data)) + return + logger.error('Response code is: {}'.format(resp.status)) diff --git a/o2ims/service/event/alarm_event.py b/o2ims/service/event/alarm_event.py new file mode 100644 index 0000000..a58c52b --- /dev/null +++ b/o2ims/service/event/alarm_event.py @@ -0,0 +1,30 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable + +from o2ims.domain import events + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +def notify_alarm_event_change( + event: events.AlarmEventChanged, + publish: Callable, +): + logger.info('In notify_alarm_event_change') + publish("AlarmEventChanged", event) + logger.debug("published Alarm Event Changed: {}".format( + event.id)) diff --git a/o2ims/service/watcher/alarm_watcher.py b/o2ims/service/watcher/alarm_watcher.py new file mode 100644 index 0000000..3581ef2 --- /dev/null +++ b/o2ims/service/watcher/alarm_watcher.py @@ -0,0 +1,92 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# from o2ims.domain.resource_type import ResourceTypeEnum +from o2common.service.client.base_client import BaseClient +# from o2ims.domain.stx_object import StxGenericModel +# from o2common.service.unit_of_work import AbstractUnitOfWork +from o2common.service.watcher.base import BaseWatcher +from o2common.service.messagebus import MessageBus +from o2ims.domain import commands + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +class AlarmWatcher(BaseWatcher): + def __init__(self, fault_client: BaseClient, + bus: MessageBus) -> None: + super().__init__(fault_client, bus) + + def _targetname(self): + return "alarm" + + def _probe(self, parent: object = None, tags: object = None): + newmodels = self._client.list() + # if len(newmodels) == 0: + # return [] + + # uow = self._bus.uow + # exist_alarms = {} + # with uow: + # rs = uow.session.execute( + # ''' + # SELECT "alarmEventRecordId" + # FROM "alarmEventRecord" + # WHERE "perceivedSeverity" != :perceived_severity_enum + # ''', + # dict(perceived_severity_enum=alarm_obj.PerceivedSeverityEnum. + # CLEARED) + # ) + # for row in rs: + # id = row[0] + # # logger.debug('Exist alarm: ' + id) + # exist_alarms[id] = False + + # ret = [] + # for m in newmodels: + # try: + # if exist_alarms[m.id]: + # ret.append(commands.UpdateAlarm(m)) + # exist_alarms[m.id] = True + # except KeyError: + # logger.debug('alarm new: ' + m.id) + # ret.append(commands.UpdateAlarm(m)) + + # for alarm in exist_alarms: + # logger.debug('exist alarm: ' + alarm) + # if exist_alarms[alarm]: + # # exist alarm is active + # continue + # event = self._client.get(alarm) + # ret.append(commands.UpdateAlarm(event)) + + # return ret + + return [commands.UpdateAlarm(m) for m in newmodels] \ + if len(newmodels) > 0 else [] + + +# class EventWatcher(BaseWatcher): +# def __init__(self, fault_client: BaseClient, +# bus: MessageBus) -> None: +# super().__init__(fault_client, bus) + +# def _targetname(self): +# return "event" + +# def _probe(self, parent: object = None, tags: object = None): +# newmodels = self._client.list() +# return [commands.UpdateAlarm(m) for m in newmodels] \ +# if len(newmodels) > 0 else [] diff --git a/o2ims/views/__init__.py b/o2ims/views/__init__.py index b465764..f67a23f 100644 --- a/o2ims/views/__init__.py +++ b/o2ims/views/__init__.py @@ -14,7 +14,7 @@ from o2common.config import config -from . import ocloud_route, provision_route +from . import ocloud_route, provision_route, alarm_route from . import api_ns from o2common.helper import o2logging @@ -24,11 +24,15 @@ logger = o2logging.get_logger(__name__) def configure_namespace(app): apiims = config.get_o2ims_api_base() apiprovision = config.get_provision_api_base() + apimonitoring = config.get_o2ims_monitoring_api_base() logger.info( - "Expose the O2 IMS API:{}\nExpose Provision API: {}". - format(apiims, apiprovision)) + "Expose the O2 IMS API:{}\nExpose Provision API: {} \ + \nExpose Monitoring API: {}". + format(apiims, apiprovision, apimonitoring)) ocloud_route.configure_api_route() provision_route.configure_api_route() + alarm_route.configure_api_route() app.add_namespace(api_ns.api_ims_inventory_v1, path=apiims) app.add_namespace(api_ns.api_provision_v1, path=apiprovision) + app.add_namespace(api_ns.api_monitoring_v1, path=apimonitoring) diff --git a/o2ims/views/alarm_dto.py b/o2ims/views/alarm_dto.py new file mode 100644 index 0000000..54bfe7d --- /dev/null +++ b/o2ims/views/alarm_dto.py @@ -0,0 +1,69 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask_restx import fields + +from o2ims.views.api_ns import api_monitoring_v1 + + +class AlarmDTO: + + alarm_event_record_get = api_monitoring_v1.model( + "AlarmGetDto", + { + 'alarmEventRecordId': fields.String( + required=True, + description='Alarm Event Record ID'), + 'resourceTypeId': fields.String, + 'resourceId': fields.String, + 'alarmDefinitionId': fields.String, + 'alarmRaisedTime': fields.String, + 'perceivedSeverity': fields.String, + } + ) + + +class SubscriptionDTO: + + subscription_get = api_monitoring_v1.model( + "AlarmSubscriptionGetDto", + { + 'alarmSubscriptionId': fields.String( + required=True, + description='Alarm Subscription ID'), + 'callback': fields.String, + 'consumerSubscriptionId': fields.String, + 'filter': fields.String, + } + ) + + subscription = api_monitoring_v1.model( + "AlarmSubscriptionCreateDto", + { + 'callback': fields.String( + required=True, + description='Alarm Subscription callback address'), + 'consumerSubscriptionId': fields.String, + 'filter': fields.String, + } + ) + + subscription_post_resp = api_monitoring_v1.model( + "AlarmSubscriptionCreatedRespDto", + { + 'alarmSubscriptionId': fields.String( + required=True, + description='Alarm Subscription ID'), + } + ) diff --git a/o2ims/views/alarm_route.py b/o2ims/views/alarm_route.py new file mode 100644 index 0000000..91ca8f8 --- /dev/null +++ b/o2ims/views/alarm_route.py @@ -0,0 +1,103 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from flask_restx import Resource + +from o2common.service.messagebus import MessageBus +from o2ims.views import alarm_view +from o2ims.views.api_ns import api_monitoring_v1 +from o2ims.views.alarm_dto import AlarmDTO, SubscriptionDTO + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +def configure_api_route(): + # Set global bus for resource + global bus + bus = MessageBus.get_instance() + + +# ---------- Alarm Event Record ---------- # +@api_monitoring_v1.route("/alarms") +class AlarmListRouter(Resource): + + model = AlarmDTO.alarm_event_record_get + + @api_monitoring_v1.marshal_list_with(model) + def get(self): + return alarm_view.alarm_event_records(bus.uow) + + +@api_monitoring_v1.route("/alarms/") +@api_monitoring_v1.param('alarmEventRecordId', 'ID of the alarm event record') +@api_monitoring_v1.response(404, 'Alarm Event Record not found') +class AlarmGetRouter(Resource): + + model = AlarmDTO.alarm_event_record_get + + @api_monitoring_v1.doc('Get resource type') + @api_monitoring_v1.marshal_with(model) + def get(self, alarmEventRecordId): + result = alarm_view.alarm_event_record_one(alarmEventRecordId, bus.uow) + if result is not None: + return result + api_monitoring_v1.abort( + 404, "Resource type {} doesn't exist".format(alarmEventRecordId)) + + +# ---------- Alarm Subscriptions ---------- # +@api_monitoring_v1.route("/alarmSubscriptions") +class SubscriptionsListRouter(Resource): + + model = SubscriptionDTO.subscription_get + expect = SubscriptionDTO.subscription + post_resp = SubscriptionDTO.subscription_post_resp + + @api_monitoring_v1.doc('List alarm subscriptions') + @api_monitoring_v1.marshal_list_with(model) + def get(self): + return alarm_view.subscriptions(bus.uow) + + @api_monitoring_v1.doc('Create a alarm subscription') + @api_monitoring_v1.expect(expect) + @api_monitoring_v1.marshal_with(post_resp, code=201) + def post(self): + data = api_monitoring_v1.payload + result = alarm_view.subscription_create(data, bus.uow) + return result, 201 + + +@api_monitoring_v1.route("/alarmSubscriptions/") +@api_monitoring_v1.param('alarmSubscriptionID', 'ID of the Alarm Subscription') +@api_monitoring_v1.response(404, 'Alarm Subscription not found') +class SubscriptionGetDelRouter(Resource): + + model = SubscriptionDTO.subscription_get + + @api_monitoring_v1.doc('Get Alarm Subscription by ID') + @api_monitoring_v1.marshal_with(model) + def get(self, alarmSubscriptionID): + result = alarm_view.subscription_one( + alarmSubscriptionID, bus.uow) + if result is not None: + return result + api_monitoring_v1.abort(404, "Subscription {} doesn't exist".format( + alarmSubscriptionID)) + + @api_monitoring_v1.doc('Delete subscription by ID') + @api_monitoring_v1.response(204, 'Subscription deleted') + def delete(self, alarmSubscriptionID): + result = alarm_view.subscription_delete(alarmSubscriptionID, bus.uow) + return result, 204 diff --git a/o2ims/views/alarm_view.py b/o2ims/views/alarm_view.py new file mode 100644 index 0000000..258e323 --- /dev/null +++ b/o2ims/views/alarm_view.py @@ -0,0 +1,71 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid as uuid + +from o2common.service import unit_of_work +from o2ims.views.alarm_dto import SubscriptionDTO +from o2ims.domain.alarm_obj import AlarmSubscription + +from o2common.helper import o2logging +# from o2common.config import config +logger = o2logging.get_logger(__name__) + + +def alarm_event_records(uow: unit_of_work.AbstractUnitOfWork): + with uow: + li = uow.alarm_event_records.list() + return [r.serialize() for r in li] + + +def alarm_event_record_one(alarmEventRecordId: str, + uow: unit_of_work.AbstractUnitOfWork): + with uow: + first = uow.alarm_event_records.get(alarmEventRecordId) + return first.serialize() if first is not None else None + + +def subscriptions(uow: unit_of_work.AbstractUnitOfWork): + with uow: + li = uow.alarm_subscriptions.list() + return [r.serialize() for r in li] + + +def subscription_one(subscriptionId: str, + uow: unit_of_work.AbstractUnitOfWork): + with uow: + first = uow.alarm_subscriptions.get(subscriptionId) + return first.serialize() if first is not None else None + + +def subscription_create(subscriptionDto: SubscriptionDTO.subscription, + uow: unit_of_work.AbstractUnitOfWork): + + sub_uuid = str(uuid.uuid4()) + subscription = AlarmSubscription( + sub_uuid, subscriptionDto['callback'], + subscriptionDto['consumerSubscriptionId'], + subscriptionDto['filter']) + with uow: + uow.alarm_subscriptions.add(subscription) + uow.commit() + return {"alarmSubscriptionId": sub_uuid} + + +def subscription_delete(subscriptionId: str, + uow: unit_of_work.AbstractUnitOfWork): + with uow: + uow.alarm_subscriptions.delete(subscriptionId) + uow.commit() + return True diff --git a/o2ims/views/api_ns.py b/o2ims/views/api_ns.py index a633a94..3fbdc18 100644 --- a/o2ims/views/api_ns.py +++ b/o2ims/views/api_ns.py @@ -8,3 +8,7 @@ api_ims_inventory_v1 = Namespace( api_provision_v1 = Namespace( "PROVISION", description='Provision related operations.') + +api_monitoring_v1 = Namespace( + "O2IMS_InfrastructureMonitoring", + description='O2 IMS Monitoring related operations.') diff --git a/requirements-stx.txt b/requirements-stx.txt index 4f5ef1a..31b4dab 100644 --- a/requirements-stx.txt +++ b/requirements-stx.txt @@ -1,3 +1,4 @@ -e git+https://opendev.org/starlingx/distcloud-client.git@master#egg=distributedcloud-client&subdirectory=distributedcloud-client --e git+https://opendev.org/starlingx/config.git@master#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client# +-e git+https://opendev.org/starlingx/config.git@master#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client # -e git+https://github.com/cloudify-incubator/cloudify-helm-plugin.git@master#egg=helmsdk&subdirectory=helm_sdk +-e git+https://opendev.org/starlingx/fault.git@master#egg=fmclient&subdirectory=python-fmclient/fmclient diff --git a/requirements-test.txt b/requirements-test.txt index c5ae7f0..4abb891 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -10,6 +10,7 @@ pytest-icdiff mock tenacity +pyOpenSSL # -e git+https://opendev.org/starlingx/distcloud-client.git@master#egg=distributedcloud-client&subdirectory=distributedcloud-client # -e git+https://opendev.org/starlingx/config.git@master#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 09667d5..d09ba1c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,6 +24,9 @@ from o2ims.views import configure_namespace from o2app.bootstrap import bootstrap +#import os +#os.environ['ALARM_YAML'] = 'configs/alarm.yaml' + @pytest.fixture def mock_uow(): diff --git a/tests/integration-ocloud/test_clientdriver_stx_fault.py b/tests/integration-ocloud/test_clientdriver_stx_fault.py new file mode 100644 index 0000000..3c8699b --- /dev/null +++ b/tests/integration-ocloud/test_clientdriver_stx_fault.py @@ -0,0 +1,89 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# import sys +# import logging +import pytest + +from o2common.config import config +from o2ims.adapter.clients.fault_client import StxFaultClientImp +# from o2ims.adapter.clients.ocloud_client import StxClientImp +from cgtsclient.client import get_client as get_stx_client +from dcmanagerclient.api.client import client as get_dc_client +from fmclient.client import get_client as get_fm_client + + +@pytest.fixture +def real_stx_aio_client(): + os_client_args = config.get_stx_access_info() + config_client = get_stx_client(**os_client_args) + yield config_client + + +@pytest.fixture +def real_stx_dc_client(): + os_client_args = config.get_dc_access_info() + config_client = get_dc_client(**os_client_args) + yield config_client + + +@pytest.fixture +def real_stx_fm_client(): + os_client_args = config.get_fm_access_info() + config_client = get_fm_client(1, **os_client_args) + yield config_client + +# pytestmark = pytest.mark.usefixtures("mappers") + + +def test_get_alarmlist(real_stx_fm_client): + fmClientImp = StxFaultClientImp(real_stx_fm_client) + assert fmClientImp is not None + alarms = fmClientImp.getAlarmList() + assert alarms is not None + assert len(alarms) > 0 + + +def test_get_alarminfo(real_stx_fm_client): + fmClientImp = StxFaultClientImp(real_stx_fm_client) + assert fmClientImp is not None + alarms = fmClientImp.getAlarmList() + assert alarms is not None + assert len(alarms) > 0 + alarm1 = alarms[0] + alarm2 = fmClientImp.getAlarmInfo(alarm1.id) + assert alarm1 != alarm2 + assert alarm1.id == alarm2.id + # fmClientImp.getAlarmInfo('f87478e9-4cec-44dc-8f13-9304445d4070') + # assert fmClientImp is None + + +def test_get_eventlist(real_stx_fm_client): + fmClientImp = StxFaultClientImp(real_stx_fm_client) + assert fmClientImp is not None + events = fmClientImp.getEventList() + assert events is not None + assert len(events) > 0 + + +def test_get_eventinfo(real_stx_fm_client): + fmClientImp = StxFaultClientImp(real_stx_fm_client) + assert fmClientImp is not None + events = fmClientImp.getEventList() + assert events is not None + assert len(events) > 0 + event1 = events[0] + event2 = fmClientImp.getEventInfo(event1.id) + assert event1 != event2 + assert event1.id == event2.id diff --git a/tests/unit/test_alarm.py b/tests/unit/test_alarm.py new file mode 100644 index 0000000..5bcde82 --- /dev/null +++ b/tests/unit/test_alarm.py @@ -0,0 +1,322 @@ +# Copyright (C) 2021 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +import time +import json +from datetime import datetime +from unittest.mock import MagicMock +from typing import Callable + +from o2common.service.watcher import worker +from o2common.service.unit_of_work import AbstractUnitOfWork +from o2common.service.client.base_client import BaseClient +from o2common.service.watcher.base import BaseWatcher, WatcherTree +from o2common.service import messagebus +from o2common.config import config + +from o2ims.domain.resource_type import ResourceTypeEnum +from o2ims.domain import alarm_obj +from o2ims.domain import commands +from o2ims.views import alarm_view +from o2ims.service.watcher.alarm_watcher import AlarmWatcher + +from o2app.service import handlers +from o2app import bootstrap + + +def test_new_alarm_event_record(): + alarm_event_record_id1 = str(uuid.uuid4()) + alarm_event_record = alarm_obj.AlarmEventRecord( + alarm_event_record_id1, '', + '', '', '', alarm_obj.PerceivedSeverityEnum.CRITICAL) + assert alarm_event_record_id1 is not None and \ + alarm_event_record.alarmEventRecordId == alarm_event_record_id1 + + +def test_view_alarm_event_records(mock_uow): + session, uow = mock_uow + + alarm_event_record_id1 = str(uuid.uuid4()) + alarm_event_record1 = MagicMock() + alarm_event_record1.serialize.return_value = { + "alarmEventRecordId": alarm_event_record_id1} + session.return_value.query.return_value = [alarm_event_record1] + + alarm_event_record_list = alarm_view.alarm_event_records(uow) + assert str(alarm_event_record_list[0].get( + "alarmEventRecordId")) == alarm_event_record_id1 + + +def test_view_alarm_event_record_one(mock_uow): + session, uow = mock_uow + + alarm_event_record_id1 = str(uuid.uuid4()) + session.return_value.query.return_value.filter_by.return_value.first.\ + return_value.serialize.return_value = None + + # Query return None + alarm_event_record1 = alarm_view.alarm_event_record_one( + alarm_event_record_id1, uow) + assert alarm_event_record1 is None + + session.return_value.query.return_value.filter_by.return_value.first.\ + return_value.serialize.return_value = { + "alarmEventRecordId": alarm_event_record_id1} + + alarm_event_record1 = alarm_view.alarm_event_record_one( + alarm_event_record_id1, uow) + assert str(alarm_event_record1.get( + "alarmEventRecordId")) == alarm_event_record_id1 + + +def test_alarm_dictionary(mock_uow): + session, uow = mock_uow + alarm_dict1 = alarm_obj.AlarmDictionary('test1') + alarm_dict1.entityType = 'test1' + with uow: + uow.alarm_dictionaries.add(alarm_dict1) + + alarm_dict2 = uow.alarm_dictionaries.get('test1') + assert alarm_dict1 == alarm_dict2 + + dict_list = uow.alarm_dictionaries.list() + assert len(dict_list) > 0 + + +def test_flask_get_list(mock_flask_uow): + session, app = mock_flask_uow + session.query.return_value = [] + apibase = config.get_o2ims_monitoring_api_base() + + with app.test_client() as client: + # Get list and return empty list + ########################## + resp = client.get(apibase+"/alarms") + assert resp.get_data() == b'[]\n' + + resp = client.get(apibase+"/alarmSubscriptions") + assert resp.get_data() == b'[]\n' + + +def test_flask_get_one(mock_flask_uow): + session, app = mock_flask_uow + + session.return_value.query.return_value.filter_by.return_value.\ + first.return_value = None + apibase = config.get_o2ims_monitoring_api_base() + + with app.test_client() as client: + # Get one and return 404 + ########################### + alarm_id1 = str(uuid.uuid4()) + resp = client.get(apibase+"/alarms/"+alarm_id1) + assert resp.status_code == 404 + + sub_id1 = str(uuid.uuid4()) + resp = client.get(apibase+"/alarmSubscriptions/"+sub_id1) + assert resp.status_code == 404 + + +def test_flask_post(mock_flask_uow): + session, app = mock_flask_uow + apibase = config.get_o2ims_monitoring_api_base() + + with app.test_client() as client: + session.return_value.execute.return_value = [] + + sub_callback = 'http://subscription/callback/url' + resp = client.post(apibase+'/alarmSubscriptions', json={ + 'callback': sub_callback, + 'consumerSubscriptionId': 'consumerSubId1', + 'filter': 'empty' + }) + assert resp.status_code == 201 + assert 'alarmSubscriptionId' in resp.get_json() + + +def test_flask_delete(mock_flask_uow): + session, app = mock_flask_uow + apibase = config.get_o2ims_monitoring_api_base() + + with app.test_client() as client: + session.return_value.execute.return_value.first.return_value = {} + + subscription_id1 = str(uuid.uuid4()) + resp = client.delete(apibase+"/alarmSubscriptions/"+subscription_id1) + assert resp.status_code == 204 + + +def test_flask_not_allowed(mock_flask_uow): + _, app = mock_flask_uow + apibase = config.get_o2ims_monitoring_api_base() + + with app.test_client() as client: + # Testing resource type not support method + ########################## + uri = apibase + "/alarms" + resp = client.post(uri) + assert resp.status == '405 METHOD NOT ALLOWED' + resp = client.put(uri) + assert resp.status == '405 METHOD NOT ALLOWED' + resp = client.patch(uri) + assert resp.status == '405 METHOD NOT ALLOWED' + resp = client.delete(uri) + assert resp.status == '405 METHOD NOT ALLOWED' + + +class FakeAlarmClient(BaseClient): + def __init__(self): + super().__init__() + fakeAlarm = alarm_obj.FaultGenericModel(ResourceTypeEnum.OCLOUD) + fakeAlarm.id = str(uuid.uuid4()) + fakeAlarm.name = 'alarm' + fakeAlarm.content = json.dumps({}) + fakeAlarm.createtime = datetime.now() + fakeAlarm.updatetime = datetime.now() + fakeAlarm.hash = str(hash((fakeAlarm.id, fakeAlarm.updatetime))) + self.fakeAlarm = fakeAlarm + + def _get(self, id) -> alarm_obj.FaultGenericModel: + return self.fakeAlarm + + def _list(self): + return [self.fakeAlarm] + + def _set_stx_client(self): + pass + + +# class FakeStxObjRepo(StxObjectRepository): +# def __init__(self): +# super().__init__() +# self.alarms = [] + +# def _add(self, alarm: alarm_obj.AlarmEventRecord): +# self.alarms.append(alarm) + +# def _get(self, alarmid) -> alarm_obj.AlarmEventRecord: +# filtered = [a for a in self.alarms if a.id == alarmid] +# return filtered.pop() + +# def _list(self) -> List[alarm_obj.AlarmEventRecord]: +# return [x for x in self.oclouds] + +# def _update(self, alarm: alarm_obj.AlarmEventRecord): +# filtered = [a for a in self.alarms if a.id == alarm.id] +# assert len(filtered) == 1 +# ocloud1 = filtered.pop() +# ocloud1.update_by(alarm) + + +class FakeUnitOfWork(AbstractUnitOfWork): + def __init__(self, session_factory=None): + self.session_factory = session_factory + + def __enter__(self): + self.session = self.session_factory + # self.stxobjects = FakeStxObjRepo() + return super().__enter__() + + def __exit__(self, *args): + super().__exit__(*args) + # self.session.close() + + def _commit(self): + pass + # self.session.commit() + + def rollback(self): + pass + # self.session.rollback() + + def collect_new_events(self): + yield + # return super().collect_new_events() + + +def create_alarm_fake_bus(uow): + def update_alarm( + cmd: commands.UpdateAlarm, + uow: AbstractUnitOfWork, + publish: Callable): + return + + handlers.EVENT_HANDLERS = {} + handlers.COMMAND_HANDLERS = { + commands.UpdateAlarm: update_alarm, + } + bus = bootstrap.bootstrap(False, uow) + return bus + + +def test_probe_new_alarm(): + session = MagicMock() + session.return_value.execute.return_value = [] + fakeuow = FakeUnitOfWork(session) + bus = create_alarm_fake_bus(fakeuow) + fakeClient = FakeAlarmClient() + alarmwatcher = AlarmWatcher(fakeClient, bus) + cmds = alarmwatcher.probe() + assert cmds is not None + assert len(cmds) == 1 + assert cmds[0].data.name == "alarm" + # assert len(fakeuow.stxobjects.oclouds) == 1 + # assert fakeuow.stxobjects.oclouds[0].name == "stx1" + + +def test_watchers_worker(): + testedworker = worker.PollWorker() + + class FakeAlarmWatcher(BaseWatcher): + def __init__(self, client: BaseClient, + bus: messagebus) -> None: + super().__init__(client, None) + self.fakeOcloudWatcherCounter = 0 + self._client = client + self._bus = bus + + def _targetname(self): + return "fakealarmwatcher" + + def _probe(self, parent: object = None, tags=None): + # import pdb; pdb.set_trace() + self.fakeOcloudWatcherCounter += 1 + # hacking to stop the blocking sched task + if self.fakeOcloudWatcherCounter > 2: + testedworker.stop() + return [] + + # fakeRepo = FakeOcloudRepo() + fakeuow = FakeUnitOfWork() + bus = create_alarm_fake_bus(fakeuow) + + fakeClient = FakeAlarmClient() + fakewatcher = FakeAlarmWatcher(fakeClient, bus) + + root = WatcherTree(fakewatcher) + + testedworker.set_interval(1) + testedworker.add_watcher(root) + assert fakewatcher.fakeOcloudWatcherCounter == 0 + + count1 = fakewatcher.fakeOcloudWatcherCounter + testedworker.start() + time.sleep(20) + assert fakewatcher.fakeOcloudWatcherCounter > count1 + + # assumed hacking: probe has stopped the sched task + count3 = fakewatcher.fakeOcloudWatcherCounter + time.sleep(3) + assert fakewatcher.fakeOcloudWatcherCounter == count3 diff --git a/tests/unit/test_ocloud.py b/tests/unit/test_ocloud.py index 3359230..a29c2a6 100644 --- a/tests/unit/test_ocloud.py +++ b/tests/unit/test_ocloud.py @@ -14,7 +14,7 @@ import uuid from unittest.mock import MagicMock -from o2dms.domain import dms +# from o2dms.domain import dms from o2ims.domain import ocloud, subscription_obj, configuration_obj from o2ims.domain import resource_type as rt diff --git a/tests/unit/test_provision.py b/tests/unit/test_provision.py index 99c4fde..96d53f7 100644 --- a/tests/unit/test_provision.py +++ b/tests/unit/test_provision.py @@ -94,19 +94,19 @@ def test_flask_get_one(mock_flask_uow): assert resp.status_code == 404 -def test_flask_post(mock_flask_uow): - session, app = mock_flask_uow - apibase = config.get_provision_api_base() - - with app.test_client() as client: - session.return_value.execute.return_value = [] - - conf_callback = 'http://registration/callback/url' - resp = client.post(apibase+'/smo-endpoint', json={ - 'endpoint': conf_callback - }) - assert resp.status_code == 201 - assert 'id' in resp.get_json() +# def test_flask_post(mock_flask_uow): +# session, app = mock_flask_uow +# apibase = config.get_provision_api_base() + +# with app.test_client() as client: +# session.return_value.execute.return_value = [] + +# conf_callback = 'http://registration/callback/url' +# resp = client.post(apibase+'/smo-endpoint', json={ +# 'endpoint': conf_callback +# }) +# assert resp.status_code == 201 +# assert 'id' in resp.get_json() def test_flask_delete(mock_flask_uow): -- 2.16.6 From 7c167626f2692556b1fe073f87150f54a8c9910a Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Mon, 17 Oct 2022 23:14:32 +0800 Subject: [PATCH 04/16] Update O2app start with global ocloud ID Issue-ID: INF-316 Signed-off-by: Zhang Rong(Jon) Change-Id: If7e9d078494bd95e62a36dc9698a010348c3bcf9 --- configs/o2app.conf | 11 +++ o2common/config/__init__.py | 7 ++ o2common/config/base.py | 139 ++++++++++++++++++++++++++++++++ o2common/config/config.py | 5 ++ o2ims/service/auditor/ocloud_handler.py | 9 ++- tests/unit/test_watcher.py | 4 +- tox.ini | 2 + 7 files changed, 173 insertions(+), 4 deletions(-) create mode 100644 configs/o2app.conf create mode 100644 o2common/config/base.py diff --git a/configs/o2app.conf b/configs/o2app.conf new file mode 100644 index 0000000..2fab3ba --- /dev/null +++ b/configs/o2app.conf @@ -0,0 +1,11 @@ +[DEFAULT] + +ocloud_global_id = 4e24b97c-8c49-4c4f-b53e-3de5235a4e37 +smo_url = http://127.0.0.1:8090/register + +[API] +test = "hello" + +[WATCHER] + +[PUBSUB] diff --git a/o2common/config/__init__.py b/o2common/config/__init__.py index 813897e..e3004d4 100644 --- a/o2common/config/__init__.py +++ b/o2common/config/__init__.py @@ -11,3 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from o2common.config.base import Config +from o2common.config.config import get_config_path + + +conf = Config() +conf.load(get_config_path()) diff --git a/o2common/config/base.py b/o2common/config/base.py new file mode 100644 index 0000000..af49f30 --- /dev/null +++ b/o2common/config/base.py @@ -0,0 +1,139 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import configparser + + +class Error(Exception): + """Base class for cfg exceptions.""" + + def __init__(self, msg=None): + self.msg = msg + + def __str__(self): + return self.msg + + +class NoSuchOptError(Error, AttributeError): + """Raised if an opt which doesn't exist is referenced.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + group_name = 'DEFAULT' if self.group is None else self.group.name + return "no such option %s in group [%s]" % (self.opt_name, group_name) + + +class NoSuchConfigFile(Error): + """Raised if the config file does not exist.""" + + def __init__(self, file_path): + self.file_path = file_path + + def __str__(self): + return "no such file %s exist" % self.file_path + + +class Section: + def __init__(self, section: str) -> None: + self.group_name = section + self._options = {} + + def _set(self, name, value): + opt = getattr(self, name) + if opt is None: + setattr(self, name, value) + if name not in self._options: + self._options[name] = value + + def _get(self, name): + name = name.lower() + if name in self._options: + return self._options[name] + + def __getattr__(self, name): + try: + return self._get(name) + except ValueError: + raise + except Exception: + raise NoSuchOptError(name, self.group_name) + + +class Config: + def __init__(self) -> None: + self.__cache = {'b': 456} + self._sections = {} + + def _set(self, section, name='', value=''): + group = getattr(self, section) + if group is None: + group = Section(section) + setattr(self, section, group) + if section not in self._sections: + self._sections[section] = group + if name != '': + setattr(group, name, value) + return group + + def _get(self, name): + if name in self._sections: + return self._sections(name) + + if name in self.__cache: + return self.__cache(name) + + def __getattr__(self, name): + try: + return self._get(name) + except ValueError: + raise + except Exception: + raise NoSuchOptError(name) + + def load(self, file_path): + if not os.path.exists(file_path): + raise NoSuchConfigFile(file_path) + conf = configparser.ConfigParser() + conf.read(file_path) + default_group = self._set('DEFAULT') + for option in conf['DEFAULT']: + print(option) + default_group._set(option, conf['DEFAULT'][option]) + for section in conf.sections(): + group = self._set(section) + for option in conf[section]: + group._set(option, conf[section][option]) + + +if __name__ == "__main__": + conf = Config() + # conf._set('default', 'a', 123) + + # print(conf.default.a) + # print(conf.b) + + # conf = configparser.ConfigParser() + # conf.read('configs/o2app.conf') + # print(conf) + # print(conf['DEFAULT'].__dict__) + # print(conf['DEFAULT']['test']) + conf.load('configs/o2app.conf') + print(conf.API.test) + print(conf.DEFAULT.test) + print(conf.PUBSUB.ooo) + print(conf.DEFAULT.oCloudGlobalID) diff --git a/o2common/config/config.py b/o2common/config/config.py index e42c886..d3a076d 100644 --- a/o2common/config/config.py +++ b/o2common/config/config.py @@ -24,6 +24,11 @@ _DEFAULT_DCMANAGER_URL = "http://192.168.204.1:8119/v1.0" _DEFAULT_STX_URL = "http://192.168.204.1:5000/v3" +def get_config_path(): + path = os.environ.get("O2APP_CONFIG", "/configs/o2app.conf") + return path + + def get_postgres_uri(): host = os.environ.get("DB_HOST", "localhost") port = 54321 if host == "localhost" else 5432 diff --git a/o2ims/service/auditor/ocloud_handler.py b/o2ims/service/auditor/ocloud_handler.py index 4cc8ec7..d1e2fa7 100644 --- a/o2ims/service/auditor/ocloud_handler.py +++ b/o2ims/service/auditor/ocloud_handler.py @@ -20,7 +20,7 @@ from __future__ import annotations # from typing import List, Dict, Callable, Type # TYPE_CHECKING -from o2common.config import config +from o2common.config import config, conf # from o2common.service.messagebus import MessageBus from o2common.service.unit_of_work import AbstractUnitOfWork from o2ims.domain import events, commands @@ -82,13 +82,18 @@ def is_outdated(ocloud: Ocloud, stxobj: StxGenericModel): def create_by(stxobj: StxGenericModel) -> Ocloud: imsendpoint = config.get_api_url() + config.get_o2ims_api_base() + '/' - globalcloudId = stxobj.id # to be updated + globalcloudId = conf.DEFAULT.ocloud_global_id description = "An ocloud" ocloud = Ocloud(stxobj.id, stxobj.name, imsendpoint, globalcloudId, description, 1) ocloud.createtime = stxobj.createtime ocloud.updatetime = stxobj.updatetime ocloud.hash = stxobj.hash + ocloud.events.append(events.OcloudChanged( + id=stxobj.id, + notificationEventType=NotificationEventEnum.CREATE, + updatetime=stxobj.updatetime + )) return ocloud diff --git a/tests/unit/test_watcher.py b/tests/unit/test_watcher.py index 5b1f5b4..4bb707f 100644 --- a/tests/unit/test_watcher.py +++ b/tests/unit/test_watcher.py @@ -136,12 +136,12 @@ def create_fake_bus(uow): publish: Callable): return - fakeuow = FakeUnitOfWork() + # fakeuow = FakeUnitOfWork() handlers.EVENT_HANDLERS = {} handlers.COMMAND_HANDLERS = { commands.UpdateOCloud: update_ocloud, } - bus = bootstrap.bootstrap(False, fakeuow) + bus = bootstrap.bootstrap(False, uow) return bus diff --git a/tox.ini b/tox.ini index 03f379c..14651d8 100644 --- a/tox.ini +++ b/tox.ini @@ -36,6 +36,8 @@ commands = flake8 o2common [testenv:code] +setenv = + O2APP_CONFIG=configs/o2app.conf commands = pytest tests/unit -- 2.16.6 From 9ee90ffa2414326c26fd10edc59bf315204254e2 Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Tue, 18 Oct 2022 17:13:47 +0800 Subject: [PATCH 05/16] Update SMO register process; remove provision code Issue-ID: INF-304 Signed-off-by: Zhang Rong(Jon) Change-Id: I48e75c9c4409b880d1df2c1cbbe92741698e4d9c --- configs/o2app.conf | 2 +- o2app/adapter/unit_of_work.py | 5 - o2app/entrypoints/redis_eventconsumer.py | 21 ++-- o2app/service/handlers.py | 6 +- o2common/config/base.py | 1 - o2common/config/config.py | 4 - o2common/domain/base.py | 14 +-- o2ims/adapter/ocloud_repository.py | 26 +---- o2ims/adapter/orm.py | 17 +-- o2ims/domain/configuration_obj.py | 53 ---------- o2ims/domain/configuration_repo.py | 57 ---------- o2ims/domain/events.py | 6 -- o2ims/domain/ocloud.py | 2 +- o2ims/domain/subscription_obj.py | 4 +- o2ims/service/command/registration_handler.py | 59 ++++------- o2ims/service/event/configuration_event.py | 30 ------ o2ims/views/__init__.py | 9 +- o2ims/views/provision_dto.py | 49 --------- o2ims/views/provision_route.py | 74 ------------- o2ims/views/provision_view.py | 71 ------------- tests/conftest.py | 31 ++++-- tests/unit/test_ocloud.py | 11 +- tests/unit/test_provision.py | 147 -------------------------- 23 files changed, 69 insertions(+), 630 deletions(-) delete mode 100644 o2ims/domain/configuration_obj.py delete mode 100644 o2ims/domain/configuration_repo.py delete mode 100644 o2ims/service/event/configuration_event.py delete mode 100644 o2ims/views/provision_dto.py delete mode 100644 o2ims/views/provision_route.py delete mode 100644 o2ims/views/provision_view.py delete mode 100644 tests/unit/test_provision.py diff --git a/configs/o2app.conf b/configs/o2app.conf index 2fab3ba..7a97437 100644 --- a/configs/o2app.conf +++ b/configs/o2app.conf @@ -1,7 +1,7 @@ [DEFAULT] ocloud_global_id = 4e24b97c-8c49-4c4f-b53e-3de5235a4e37 -smo_url = http://127.0.0.1:8090/register +smo_register_url = http://127.0.0.1:8090/register [API] test = "hello" diff --git a/o2app/adapter/unit_of_work.py b/o2app/adapter/unit_of_work.py index f046db5..7e16308 100644 --- a/o2app/adapter/unit_of_work.py +++ b/o2app/adapter/unit_of_work.py @@ -57,8 +57,6 @@ class SqlAlchemyUnitOfWork(AbstractUnitOfWork): .ResourceSqlAlchemyRepository(self.session) self.subscriptions = ocloud_repository\ .SubscriptionSqlAlchemyRepository(self.session) - self.configurations = ocloud_repository\ - .ConfigurationSqlAlchemyRepository(self.session) self.deployment_managers = ocloud_repository\ .DeploymentManagerSqlAlchemyRepository(self.session) self.nfdeployment_descs = dms_repository\ @@ -111,9 +109,6 @@ class SqlAlchemyUnitOfWork(AbstractUnitOfWork): for entry in self.subscriptions.seen: while hasattr(entry, 'events') and len(entry.events) > 0: yield entry.events.pop(0) - for entry in self.configurations.seen: - while hasattr(entry, 'events') and len(entry.events) > 0: - yield entry.events.pop(0) for entry in self.nfdeployment_descs.seen: while hasattr(entry, 'events') and len(entry.events) > 0: yield entry.events.pop(0) diff --git a/o2app/entrypoints/redis_eventconsumer.py b/o2app/entrypoints/redis_eventconsumer.py index 04ef31c..98e198b 100644 --- a/o2app/entrypoints/redis_eventconsumer.py +++ b/o2app/entrypoints/redis_eventconsumer.py @@ -13,18 +13,17 @@ # limitations under the License. # import json + import redis import json from o2app import bootstrap from o2common.config import config -# from o2common.domain import commands from o2dms.domain import commands from o2ims.domain import commands as imscmd +from o2ims.domain.subscription_obj import Message2SMO, RegistrationMessage +from o2ims.domain.alarm_obj import AlarmEvent2SMO from o2common.helper import o2logging -from o2ims.domain.subscription_obj import Message2SMO, NotificationEventEnum,\ - RegistrationMessage -from o2ims.domain.alarm_obj import AlarmEvent2SMO logger = o2logging.get_logger(__name__) r = redis.Redis(**config.get_redis_host_and_port()) @@ -39,7 +38,6 @@ def main(): pubsub = r.pubsub(ignore_subscribe_messages=True) pubsub.subscribe("NfDeploymentStateChanged") pubsub.subscribe('ResourceChanged') - pubsub.subscribe('ConfigurationChanged') pubsub.subscribe('OcloudChanged') pubsub.subscribe('AlarmEventChanged') @@ -75,19 +73,14 @@ def handle_changed(m, bus): eventtype=data['notificationEventType'], updatetime=data['updatetime'])) bus.handle(cmd) - elif channel == 'ConfigurationChanged': - datastr = m['data'] - data = json.loads(datastr) - logger.info('ConfigurationChanged with cmd:{}'.format(data)) - cmd = imscmd.Register2SMO(data=RegistrationMessage(id=data['id'])) - bus.handle(cmd) elif channel == 'OcloudChanged': datastr = m['data'] data = json.loads(datastr) logger.info('OcloudChanged with cmd:{}'.format(data)) - if data['notificationEventType'] == NotificationEventEnum.CREATE: - cmd = imscmd.Register2SMO(data=RegistrationMessage(is_all=True)) - bus.handle(cmd) + cmd = imscmd.Register2SMO(data=RegistrationMessage( + data['notificationEventType'], + id=data['id'])) + bus.handle(cmd) elif channel == 'AlarmEventChanged': datastr = m['data'] data = json.loads(datastr) diff --git a/o2app/service/handlers.py b/o2app/service/handlers.py index d630720..cc69fc5 100644 --- a/o2app/service/handlers.py +++ b/o2app/service/handlers.py @@ -17,7 +17,7 @@ from __future__ import annotations from o2dms.service import nfdeployment_handler # from dataclasses import asdict -from typing import List, Dict, Callable, Type +from typing import Dict, Callable, Type # TYPE_CHECKING from o2ims.domain import commands, events @@ -30,7 +30,7 @@ from o2ims.service.auditor import ocloud_handler, dms_handler, \ from o2ims.service.command import notify_handler, registration_handler,\ notify_alarm_handler from o2ims.service.event import ocloud_event, resource_event, \ - resource_pool_event, configuration_event, alarm_event + resource_pool_event, alarm_event # if TYPE_CHECKING: # from . import unit_of_work @@ -56,8 +56,6 @@ EVENT_HANDLERS = { events.ResourceChanged: [resource_event.notify_resource_change], events.ResourcePoolChanged: [resource_pool_event.\ notify_resourcepool_change], - events.ConfigurationChanged: [configuration_event.\ - notify_configuration_change], events.AlarmEventChanged: [alarm_event.\ notify_alarm_event_change], } # type: Dict[Type[events.Event], Callable] diff --git a/o2common/config/base.py b/o2common/config/base.py index af49f30..e88eca4 100644 --- a/o2common/config/base.py +++ b/o2common/config/base.py @@ -112,7 +112,6 @@ class Config: conf.read(file_path) default_group = self._set('DEFAULT') for option in conf['DEFAULT']: - print(option) default_group._set(option, conf['DEFAULT'][option]) for section in conf.sections(): group = self._set(section) diff --git a/o2common/config/config.py b/o2common/config/config.py index d3a076d..cf1c08c 100644 --- a/o2common/config/config.py +++ b/o2common/config/config.py @@ -62,10 +62,6 @@ def get_o2ims_monitoring_api_base(): return get_root_api_base() + 'o2ims-infrastructureMonitoring/v1' -def get_provision_api_base(): - return get_root_api_base() + 'provision/v1' - - def get_o2dms_api_base(): return get_root_api_base() + "o2dms/v1" diff --git a/o2common/domain/base.py b/o2common/domain/base.py index e56672b..f419f23 100644 --- a/o2common/domain/base.py +++ b/o2common/domain/base.py @@ -39,13 +39,13 @@ class Serializer(object): def serialize(self): try: - # d = {c: getattr(self, c) for c in inspect(self).attrs.keys()} - # if 'createtime' in d: - # d['createtime'] = d['createtime'].isoformat() - # if 'updatetime' in d: - # d['updatetime'] = d['updatetime'].isoformat() - # return d - return {c: getattr(self, c) for c in inspect(self).attrs.keys()} + d = {c: getattr(self, c) for c in inspect(self).attrs.keys()} + if 'createtime' in d: + d['createtime'] = d['createtime'].isoformat() + if 'updatetime' in d: + d['updatetime'] = d['updatetime'].isoformat() + return d + # return {c: getattr(self, c) for c in inspect(self).attrs.keys()} except NoInspectionAvailable: return self.__dict__ diff --git a/o2ims/adapter/ocloud_repository.py b/o2ims/adapter/ocloud_repository.py index ff1cd27..d48aa06 100644 --- a/o2ims/adapter/ocloud_repository.py +++ b/o2ims/adapter/ocloud_repository.py @@ -14,11 +14,10 @@ from typing import List -from o2ims.domain import ocloud, subscription_obj, configuration_obj +from o2ims.domain import ocloud, subscription_obj from o2ims.domain.ocloud_repo import OcloudRepository, ResourceTypeRepository,\ ResourcePoolRepository, ResourceRepository, DeploymentManagerRepository from o2ims.domain.subscription_repo import SubscriptionRepository -from o2ims.domain.configuration_repo import ConfigurationRepository from o2common.helper import o2logging logger = o2logging.get_logger(__name__) @@ -164,26 +163,3 @@ class SubscriptionSqlAlchemyRepository(SubscriptionRepository): def _delete(self, subscription_id): self.session.query(subscription_obj.Subscription).filter_by( subscriptionId=subscription_id).delete() - - -class ConfigurationSqlAlchemyRepository(ConfigurationRepository): - def __init__(self, session): - super().__init__() - self.session = session - - def _add(self, configuration: configuration_obj.Configuration): - self.session.add(configuration) - - def _get(self, configuration_id) -> configuration_obj.Configuration: - return self.session.query(configuration_obj.Configuration).filter_by( - configurationId=configuration_id).first() - - def _list(self) -> List[configuration_obj.Configuration]: - return self.session.query(configuration_obj.Configuration) - - def _update(self, configuration: configuration_obj.Configuration): - self.session.add(configuration) - - def _delete(self, configuration_id): - self.session.query(configuration_obj.Configuration).filter_by( - configurationId=configuration_id).delete() diff --git a/o2ims/adapter/orm.py b/o2ims/adapter/orm.py index 4775f29..4e3e39f 100644 --- a/o2ims/adapter/orm.py +++ b/o2ims/adapter/orm.py @@ -36,7 +36,6 @@ from sqlalchemy.orm import mapper, relationship from o2ims.domain import ocloud as ocloudModel from o2ims.domain import subscription_obj as subModel -from o2ims.domain import configuration_obj as confModel from o2ims.domain import alarm_obj as alarmModel from o2ims.domain.resource_type import ResourceTypeEnum # from o2ims.domain.alarm_obj import AlarmLastChangeEnum, PerceivedSeverityEnum @@ -58,7 +57,7 @@ ocloud = Table( Column("globalcloudId", String(255)), Column("name", String(255)), Column("description", String(255)), - Column("infrastructureManagementServiceEndpoint", String(255)) + Column("serviceUri", String(255)) # Column("extensions", String(1024)) ) @@ -152,19 +151,6 @@ subscription = Table( Column("filter", String(255)), ) -configuration = Table( - "configuration", - metadata, - Column("updatetime", DateTime), - Column("createtime", DateTime), - - Column("configurationId", String(255), primary_key=True), - Column("conftype", String(255)), - Column("callback", String(255)), - Column("status", String(255)), - Column("comments", String(255)), -) - alarm_definition = Table( "alarmDefinition", metadata, @@ -257,7 +243,6 @@ def start_o2ims_mappers(engine=None): } ) mapper(subModel.Subscription, subscription) - mapper(confModel.Configuration, configuration) # IMS Infrastruture Monitoring Mappering mapper(alarmModel.AlarmEventRecord, alarm_event_record) diff --git a/o2ims/domain/configuration_obj.py b/o2ims/domain/configuration_obj.py deleted file mode 100644 index 3261b51..0000000 --- a/o2ims/domain/configuration_obj.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations -from enum import Enum -# from dataclasses import dataclass - -from o2common.domain.base import AgRoot, Serializer - - -class RegistrationStatusEnum(str, Enum): - CREATED = 'CREATED' - NOTIFIED = 'NOTIFIED' - FAILED = 'FAILED' - - -class ConfigurationTypeEnum(str, Enum): - SMO = 'SMO' - - -class Configuration(AgRoot, Serializer): - def __init__(self, id: str, url: str, - conf_type: ConfigurationTypeEnum, - status: RegistrationStatusEnum = - RegistrationStatusEnum.CREATED, - comments: str = '') -> None: - super().__init__() - self.configurationId = id - self.conftype = conf_type - self.callback = url - self.status = status - self.comments = comments - - def serialize_smo(self): - if self.conftype != ConfigurationTypeEnum.SMO: - return - - d = Serializer.serialize(self) - - d['endpoint'] = d['callback'] - d['id'] = d['configurationId'] - return d diff --git a/o2ims/domain/configuration_repo.py b/o2ims/domain/configuration_repo.py deleted file mode 100644 index 9ff95fa..0000000 --- a/o2ims/domain/configuration_repo.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -from typing import List, Set -from o2ims.domain import configuration_obj as obj - - -class ConfigurationRepository(abc.ABC): - def __init__(self): - self.seen = set() # type: Set[obj.Configuration] - - def add(self, configuration: obj.Configuration): - self._add(configuration) - self.seen.add(configuration) - - def get(self, configuration_id) -> obj.Configuration: - configuration = self._get(configuration_id) - if configuration: - self.seen.add(configuration) - return configuration - - def list(self) -> List[obj.Configuration]: - return self._list() - - def update(self, configuration: obj.Configuration): - self._update(configuration) - - def delete(self, configuration_id): - self._delete(configuration_id) - - @abc.abstractmethod - def _add(self, configuration: obj.Configuration): - raise NotImplementedError - - @abc.abstractmethod - def _get(self, configuration_id) -> obj.Configuration: - raise NotImplementedError - - @abc.abstractmethod - def _update(self, configuration: obj.Configuration): - raise NotImplementedError - - @abc.abstractmethod - def _delete(self, configuration_id): - raise NotImplementedError diff --git a/o2ims/domain/events.py b/o2ims/domain/events.py index eb0250f..19adab1 100644 --- a/o2ims/domain/events.py +++ b/o2ims/domain/events.py @@ -49,12 +49,6 @@ class ResourceChanged(Event): updatetime: datetime.now() -@dataclass -class ConfigurationChanged(Event): - id: str - updatetime: datetime.now() - - @dataclass class AlarmEventChanged(Event): id: str diff --git a/o2ims/domain/ocloud.py b/o2ims/domain/ocloud.py index 380d919..ce55aee 100644 --- a/o2ims/domain/ocloud.py +++ b/o2ims/domain/ocloud.py @@ -144,7 +144,7 @@ class Ocloud(AgRoot, Serializer): self.version_number = version_number self.name = name self.description = description - self.infrastructureManagementServiceEndpoint = imsendpoint + self.serviceUri = imsendpoint self.resourcePools = [] self.deploymentManagers = [] self.resourceTypes = [] diff --git a/o2ims/domain/subscription_obj.py b/o2ims/domain/subscription_obj.py index 596a616..49447d0 100644 --- a/o2ims/domain/subscription_obj.py +++ b/o2ims/domain/subscription_obj.py @@ -46,8 +46,8 @@ class Message2SMO(Serializer): class RegistrationMessage(Serializer): - def __init__(self, is_all: bool = None, id: str = '') -> None: - self.all = is_all if is_all is not None else False + def __init__(self, eventtype: NotificationEventEnum, id: str = '') -> None: + self.notificationEventType = eventtype self.id = id diff --git a/o2ims/service/command/registration_handler.py b/o2ims/service/command/registration_handler.py index d08bf73..40cd390 100644 --- a/o2ims/service/command/registration_handler.py +++ b/o2ims/service/command/registration_handler.py @@ -21,10 +21,10 @@ from urllib.parse import urlparse from retry import retry from o2common.service.unit_of_work import AbstractUnitOfWork -from o2common.config import config +from o2common.config import config, conf + from o2ims.domain import commands -from o2ims.domain.configuration_obj import ConfigurationTypeEnum, \ - RegistrationStatusEnum +from o2ims.domain.subscription_obj import NotificationEventEnum from o2common.helper import o2logging logger = o2logging.get_logger(__name__) @@ -36,39 +36,22 @@ def registry_to_smo( ): logger.info('In registry_to_smo') data = cmd.data - logger.info('The Register2SMO all is {}'.format(data.all)) - if data.all: - confs = uow.configrations.list() - for conf in confs: - if conf.conftype != ConfigurationTypeEnum.SMO: - continue - reg_data = conf.serialize() - logger.debug('Configuration: {}'.format( - reg_data['configurationId'])) - - register_smo(uow, reg_data) - else: - with uow: - conf = uow.configurations.get(data.id) - if conf is None: - return - logger.debug('Configuration: {}'.format(conf.configurationId)) - conf_data = conf.serialize() - register_smo(uow, conf_data) + logger.info('The Register2SMO notificationEventType is {}'.format( + data.notificationEventType)) + with uow: + ocloud = uow.oclouds.get(data.id) + if ocloud is None: + return + logger.debug('O-Cloud Global UUID: {}'.format(ocloud.globalcloudId)) + ocloud_dict = ocloud.serialize() + if data.notificationEventType == NotificationEventEnum.CREATE: + register_smo(uow, ocloud_dict) -def register_smo(uow, reg_data): - call_res = call_smo(reg_data) +def register_smo(uow, ocloud_data): + call_res = call_smo(ocloud_data) logger.debug('Call SMO response is {}'.format(call_res)) - if call_res: - reg = uow.configurations.get(reg_data['configurationId']) - if reg is None: - return - reg.status = RegistrationStatusEnum.NOTIFIED - logger.debug('Updating Configurations: {}'.format( - reg.configurationId)) - uow.configurations.update(reg) - uow.commit() + # TODO: record the result for the smo register # def retry(fun, max_tries=2): @@ -86,13 +69,15 @@ def register_smo(uow, reg_data): @retry((ConnectionRefusedError), tries=2, delay=2) def call_smo(reg_data: dict): callback_data = json.dumps({ - 'consumerSubscriptionId': reg_data['configurationId'], - 'imsUrl': config.get_api_url() + 'consumerSubscriptionId': reg_data['globalcloudId'], + 'notificationEventType': 'CREATE', + 'objectRef': config.get_api_url(), + 'postObjectState': reg_data }) logger.info('URL: {}, data: {}'.format( - reg_data['callback'], callback_data)) + conf.DEFAULT.smo_register_url, callback_data)) - o = urlparse(reg_data['callback']) + o = urlparse(conf.DEFAULT.smo_register_url) conn = http.client.HTTPConnection(o.netloc) headers = {'Content-type': 'application/json'} conn.request('POST', o.path, callback_data, headers) diff --git a/o2ims/service/event/configuration_event.py b/o2ims/service/event/configuration_event.py deleted file mode 100644 index c1e8c1c..0000000 --- a/o2ims/service/event/configuration_event.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable - -from o2ims.domain import events - -from o2common.helper import o2logging -logger = o2logging.get_logger(__name__) - - -def notify_configuration_change( - event: events.ConfigurationChanged, - publish: Callable, -): - logger.info('In notify_registration_change') - publish("ConfigurationChanged", event) - logger.debug("published Registration Changed: {}".format( - event.id)) diff --git a/o2ims/views/__init__.py b/o2ims/views/__init__.py index f67a23f..108c124 100644 --- a/o2ims/views/__init__.py +++ b/o2ims/views/__init__.py @@ -14,7 +14,7 @@ from o2common.config import config -from . import ocloud_route, provision_route, alarm_route +from . import ocloud_route, alarm_route from . import api_ns from o2common.helper import o2logging @@ -23,16 +23,13 @@ logger = o2logging.get_logger(__name__) def configure_namespace(app): apiims = config.get_o2ims_api_base() - apiprovision = config.get_provision_api_base() apimonitoring = config.get_o2ims_monitoring_api_base() logger.info( - "Expose the O2 IMS API:{}\nExpose Provision API: {} \ + "Expose the O2 IMS API:{}\n \ \nExpose Monitoring API: {}". - format(apiims, apiprovision, apimonitoring)) + format(apiims, apimonitoring)) ocloud_route.configure_api_route() - provision_route.configure_api_route() alarm_route.configure_api_route() app.add_namespace(api_ns.api_ims_inventory_v1, path=apiims) - app.add_namespace(api_ns.api_provision_v1, path=apiprovision) app.add_namespace(api_ns.api_monitoring_v1, path=apimonitoring) diff --git a/o2ims/views/provision_dto.py b/o2ims/views/provision_dto.py deleted file mode 100644 index fc71b7b..0000000 --- a/o2ims/views/provision_dto.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from flask_restx import fields - -from o2ims.views.api_ns import api_provision_v1 - - -class SmoEndpointDTO: - - endpoint_get = api_provision_v1.model( - "SmoEndpointGetDto", - { - 'id': fields.String(required=True, - description='SMO Endpoint Configuration ID'), - 'endpoint': fields.String, - 'status': fields.String, - 'comments': fields.String, - } - ) - - endpoint = api_provision_v1.model( - "SmoEndpointCreateDto", - { - 'endpoint': fields.String( - required=True, - description='Configuration SMO callback address', - example='http://mock_smo:80/registration') - } - ) - - endpoint_post_resp = api_provision_v1.model( - "SmoEndpointCreatedRespDto", - { - 'id': fields.String(required=True, - description='SMO Endpoint Configuration ID'), - } - ) diff --git a/o2ims/views/provision_route.py b/o2ims/views/provision_route.py deleted file mode 100644 index 7c91e7e..0000000 --- a/o2ims/views/provision_route.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from flask_restx import Resource - -from o2common.service.messagebus import MessageBus -from o2ims.views import provision_view -from o2ims.views.api_ns import api_provision_v1 -from o2ims.views.provision_dto import SmoEndpointDTO - - -def configure_api_route(): - # Set global bus for resource - global bus - bus = MessageBus.get_instance() - - -# ---------- SMO endpoint ---------- # -@api_provision_v1.route("/smo-endpoint") -class SmoEndpointListRouter(Resource): - - model = SmoEndpointDTO.endpoint_get - expect = SmoEndpointDTO.endpoint - post_resp = SmoEndpointDTO.endpoint_post_resp - - @api_provision_v1.doc('List SMO endpoints') - @api_provision_v1.marshal_list_with(model) - def get(self): - return provision_view.configurations(bus.uow) - - @api_provision_v1.doc('Create a SMO endpoint') - @api_provision_v1.expect(expect) - @api_provision_v1.marshal_with(post_resp, code=201) - def post(self): - data = api_provision_v1.payload - result = provision_view.configuration_create(data, bus) - return result, 201 - - -@api_provision_v1.route("/smo-endpoint/") -@api_provision_v1.param('configurationID', - 'ID of the SMO endpoint configuration') -@api_provision_v1.response(404, 'SMO Endpoint configuration not found') -class SmoEndpointGetDelRouter(Resource): - - model = SmoEndpointDTO.endpoint_get - - @api_provision_v1.doc('Get configuration by ID') - @api_provision_v1.marshal_with(model) - def get(self, configurationID): - result = provision_view.configuration_one( - configurationID, bus.uow) - if result is not None: - return result - api_provision_v1.abort(404, - "SMO Endpoint configuration {} doesn't exist". - format(configurationID)) - - @api_provision_v1.doc('Delete configuration by ID') - @api_provision_v1.response(204, 'Configuration deleted') - def delete(self, configurationID): - result = provision_view.configuration_delete(configurationID, bus.uow) - return result, 204 diff --git a/o2ims/views/provision_view.py b/o2ims/views/provision_view.py deleted file mode 100644 index 903a4bb..0000000 --- a/o2ims/views/provision_view.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import uuid -from datetime import datetime - -from o2common.service import unit_of_work, messagebus -from o2ims.domain import events -from o2ims.views.provision_dto import SmoEndpointDTO -from o2ims.domain.configuration_obj import Configuration, ConfigurationTypeEnum - - -def configurations(uow: unit_of_work.AbstractUnitOfWork): - with uow: - li = uow.configurations.list() - return [r.serialize_smo() for r in li] - - -def configuration_one(configurationId: str, - uow: unit_of_work.AbstractUnitOfWork): - with uow: - first = uow.configurations.get(configurationId) - return first.serialize_smo() if first is not None else None - - -def configuration_create(configurationDto: SmoEndpointDTO.endpoint, - bus: messagebus.MessageBus): - - conf_uuid = str(uuid.uuid4()) - configuration = Configuration( - conf_uuid, configurationDto['endpoint'], ConfigurationTypeEnum.SMO) - with bus.uow as uow: - uow.configurations.add(configuration) - logging.debug('before event length {}'.format( - len(configuration.events))) - configuration.events.append(events.ConfigurationChanged( - conf_uuid, - datetime.now())) - logging.debug('after event length {}'.format( - len(configuration.events))) - uow.commit() - _handle_events(bus) - return {"id": conf_uuid} - - -def configuration_delete(configurationId: str, - uow: unit_of_work.AbstractUnitOfWork): - with uow: - uow.configurations.delete(configurationId) - uow.commit() - return True - - -def _handle_events(bus: messagebus.MessageBus): - # handle events - events = bus.uow.collect_new_events() - for event in events: - bus.handle(event) - return True diff --git a/tests/conftest.py b/tests/conftest.py index d09ba1c..68da7c4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,17 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # pylint: disable=redefined-outer-name import shutil import subprocess @@ -14,18 +28,15 @@ from sqlalchemy.orm import sessionmaker, clear_mappers from tenacity import retry, stop_after_delay from unittest.mock import MagicMock -from o2common.config import config - -from o2ims.adapter.orm import metadata, start_o2ims_mappers -# from o2ims.adapter.clients.orm_stx import start_o2ims_stx_mappers - -from o2app.adapter import unit_of_work -from o2ims.views import configure_namespace - from o2app.bootstrap import bootstrap +from o2ims.views import configure_namespace +from o2app.adapter import unit_of_work +from o2ims.adapter.orm import metadata, start_o2ims_mappers +from o2common.config import config -#import os -#os.environ['ALARM_YAML'] = 'configs/alarm.yaml' +# import os +# os.environ['O2APP_CONFIG'] = 'configs/o2app.conf' +# os.environ['ALARM_YAML'] = 'configs/alarm.yaml' @pytest.fixture diff --git a/tests/unit/test_ocloud.py b/tests/unit/test_ocloud.py index a29c2a6..56d4e57 100644 --- a/tests/unit/test_ocloud.py +++ b/tests/unit/test_ocloud.py @@ -16,7 +16,7 @@ import uuid from unittest.mock import MagicMock # from o2dms.domain import dms -from o2ims.domain import ocloud, subscription_obj, configuration_obj +from o2ims.domain import ocloud, subscription_obj from o2ims.domain import resource_type as rt from o2ims.views import ocloud_view from o2common.config import config @@ -95,15 +95,6 @@ def test_new_subscription(): subscription1.subscriptionId == subscription_id1 -def test_new_configuration(): - configuration_id1 = str(uuid.uuid4()) - configuration1 = configuration_obj.Configuration( - configuration_id1, "https://callback/uri/write/here", - "SMO") - assert configuration_id1 is not None and\ - configuration1.configurationId == configuration_id1 - - def test_view_olcouds(mock_uow): session, uow = mock_uow diff --git a/tests/unit/test_provision.py b/tests/unit/test_provision.py deleted file mode 100644 index 96d53f7..0000000 --- a/tests/unit/test_provision.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (C) 2021 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import uuid -from unittest.mock import MagicMock - -from o2ims.domain import configuration_obj -from o2ims.views import provision_view -from o2common.config import config - - -def test_new_smo_endpoint(): - configuration_id1 = str(uuid.uuid4()) - configuration1 = configuration_obj.Configuration( - configuration_id1, "https://callback/uri/write/here", - "SMO") - assert configuration_id1 is not None and\ - configuration1.configurationId == configuration_id1 - - -def test_view_smo_endpoint(mock_uow): - session, uow = mock_uow - - configuration_id1 = str(uuid.uuid4()) - conf1 = MagicMock() - conf1.serialize_smo.return_value = { - "id": configuration_id1, - } - session.return_value.query.return_value = [conf1] - - configuration_list = provision_view.configurations(uow) - assert str(configuration_list[0].get( - "id")) == configuration_id1 - - -def test_view_smo_endpoint_one(mock_uow): - session, uow = mock_uow - - configuration_id1 = str(uuid.uuid4()) - session.return_value.query.return_value.filter_by.return_value.first.\ - return_value.serialize_smo.return_value = None - - # Query return None - configuration_res = provision_view.configuration_one( - configuration_id1, uow) - assert configuration_res is None - - session.return_value.query.return_value.filter_by.return_value.first.\ - return_value.serialize_smo.return_value = { - "id": configuration_id1, - } - - configuration_res = provision_view.configuration_one( - configuration_id1, uow) - assert str(configuration_res.get( - "id")) == configuration_id1 - - -def test_flask_get_list(mock_flask_uow): - session, app = mock_flask_uow - session.query.return_value = [] - apibase = config.get_provision_api_base() - - with app.test_client() as client: - # Get list and return empty list - ########################## - resp = client.get(apibase+"/smo-endpoint") - assert resp.get_data() == b'[]\n' - - -def test_flask_get_one(mock_flask_uow): - session, app = mock_flask_uow - - session.return_value.query.return_value.filter_by.return_value.\ - first.return_value = None - apibase = config.get_provision_api_base() - - with app.test_client() as client: - # Get one and return 404 - ########################### - configuration_id1 = str(uuid.uuid4()) - resp = client.get(apibase+"/smo-endpoint/"+configuration_id1) - assert resp.status_code == 404 - - -# def test_flask_post(mock_flask_uow): -# session, app = mock_flask_uow -# apibase = config.get_provision_api_base() - -# with app.test_client() as client: -# session.return_value.execute.return_value = [] - -# conf_callback = 'http://registration/callback/url' -# resp = client.post(apibase+'/smo-endpoint', json={ -# 'endpoint': conf_callback -# }) -# assert resp.status_code == 201 -# assert 'id' in resp.get_json() - - -def test_flask_delete(mock_flask_uow): - session, app = mock_flask_uow - apibase = config.get_provision_api_base() - - with app.test_client() as client: - session.return_value.execute.return_value.first.return_value = {} - - configuration_id1 = str(uuid.uuid4()) - resp = client.delete(apibase+"/smo-endpoint/"+configuration_id1) - assert resp.status_code == 204 - - -def test_flask_not_allowed(mock_flask_uow): - _, app = mock_flask_uow - apibase = config.get_provision_api_base() - - with app.test_client() as client: - - # Testing SMO endpoint not support method - ########################## - uri = apibase + "/smo-endpoint" - resp = client.put(uri) - assert resp.status == '405 METHOD NOT ALLOWED' - resp = client.patch(uri) - assert resp.status == '405 METHOD NOT ALLOWED' - resp = client.delete(uri) - assert resp.status == '405 METHOD NOT ALLOWED' - - configuration_id1 = str(uuid.uuid4()) - uri = apibase + "/smo-endpoint/" + configuration_id1 - resp = client.post(uri) - assert resp.status == '405 METHOD NOT ALLOWED' - resp = client.put(uri) - assert resp.status == '405 METHOD NOT ALLOWED' - resp = client.patch(uri) - assert resp.status == '405 METHOD NOT ALLOWED' -- 2.16.6 From 5601b5899b0fd15748ae0474de9f5f6dda72864c Mon Sep 17 00:00:00 2001 From: dliu5 Date: Sun, 9 Oct 2022 19:02:52 +0800 Subject: [PATCH 06/16] Add the authentication middleware for service. Issue-ID: INF-299 Signed-off-by: dliu5 Change-Id: I60fe9351532986f4c275bd7e4d1513393a373e08 --- charts/resources/scripts/init/o2api_start.sh | 2 + charts/templates/deployment.yaml | 1 + charts/templates/serviceaccount.yaml | 20 ++++ charts/values.yaml | 2 +- docs/installation-guide.rst | 47 ++++++++- o2app/entrypoints/flask_application.py | 25 ++++- o2common/authmw/__init__.py | 13 +++ o2common/authmw/authmiddleware.py | 83 +++++++++++++++ o2common/authmw/authprov.py | 144 +++++++++++++++++++++++++++ o2common/config/config.py | 43 +++++++- tests/unit/test_watcher.py | 4 +- 11 files changed, 377 insertions(+), 7 deletions(-) create mode 100644 charts/templates/serviceaccount.yaml create mode 100644 o2common/authmw/__init__.py create mode 100644 o2common/authmw/authmiddleware.py create mode 100644 o2common/authmw/authprov.py diff --git a/charts/resources/scripts/init/o2api_start.sh b/charts/resources/scripts/init/o2api_start.sh index 46ea5f5..65f3cbd 100644 --- a/charts/resources/scripts/init/o2api_start.sh +++ b/charts/resources/scripts/init/o2api_start.sh @@ -23,12 +23,14 @@ git clone "https://gerrit.o-ran-sc.org/r/pti/o2" pip install -e /root/o2 + cat <>/etc/hosts 127.0.0.1 api 127.0.0.1 postgres 127.0.0.1 redis EOF + flask run --host=0.0.0.0 --port=80 sleep infinity diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 9da4fba..5d82063 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -30,6 +30,7 @@ spec: labels: app: o2api spec: + serviceAccountName: {{ .Values.o2ims.serviceaccountname }} imagePullSecrets: - name: {{ .Values.o2ims.imagePullSecrets }} {{- if .Values.o2ims.affinity }} diff --git a/charts/templates/serviceaccount.yaml b/charts/templates/serviceaccount.yaml new file mode 100644 index 0000000..1cae523 --- /dev/null +++ b/charts/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +# Service Account for o2ims +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.o2ims.serviceaccountname }} + namespace: orano2 +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.o2ims.serviceaccountname }} +subjects: +- kind: ServiceAccount + namespace: orano2 + name: {{ .Values.o2ims.serviceaccountname }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin diff --git a/charts/values.yaml b/charts/values.yaml index c343088..0771680 100644 --- a/charts/values.yaml +++ b/charts/values.yaml @@ -30,7 +30,7 @@ global: namespace: orano2 o2ims: - imagePullSecrets: admin-orano2-registry-secret + serviceaccountname: admin image: repository: registry.local:9001/admin/o2imsdms tag: 0.1.1 diff --git a/docs/installation-guide.rst b/docs/installation-guide.rst index 68f7ed0..9b63c7c 100644 --- a/docs/installation-guide.rst +++ b/docs/installation-guide.rst @@ -139,6 +139,48 @@ The following instruction should be done outside of INF platform controller host # export API_HOST_EXTERNAL_FLOATING=$(echo ${OS_AUTH_URL} | sed -e s,`echo ${OS_AUTH_URL} | grep :// | sed -e's,^\(.*//\).*,\1,g'`,,g | cut -d/ -f1 | sed -e 's,:.*,,g') export API_HOST_EXTERNAL_FLOATING= + # please specify the smo service account yaml file + export SMO_SERVICEACCOUNT= + # service account and binding for smo yaml file + + cat <smo-serviceaccount.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + namespace: default + name: pod-reader + rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["pods"] + verbs: ["get", "watch", "list"] + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: ${SMO_SERVICEACCOUNT} + namespace: default + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: read-pods + namespace: default + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-reader + subjects: + - kind: ServiceAccount + name: ${SMO_SERVICEACCOUNT} + namespace: default + + EOF + + kubectl apply -f smo-serviceaccount.yaml + + #export the smo account token data + export SMO_TOKEN_DATA=$(kubectl -n default describe secret $(kubectl -n default get secret | grep ${SMO_SERVICEACCOUNT} | awk '{print $1}') | grep "token:" | awk '{print $2}') + cat <o2service-override.yaml o2ims: imagePullSecrets: admin-orano2-registry-secret @@ -154,6 +196,7 @@ The following instruction should be done outside of INF platform controller host OS_PASSWORD: "${OS_PASSWORD}" K8S_KUBECONFIG: "/opt/k8s_kube.conf" API_HOST_EXTERNAL_FLOATING: "${API_HOST_EXTERNAL_FLOATING}" + EOF @@ -164,8 +207,8 @@ The following instruction should be done outside of INF platform controller host helm install o2service o2/charts/ -f o2service-override.yaml helm list |grep o2service - kubectl -n ${NAMESPACE} get pods |grep o2service - kubectl -n ${NAMESPACE} get services |grep o2service + kubectl -n ${NAMESPACE} get pods |grep o2api + kubectl -n ${NAMESPACE} get services |grep o2api 2.4 Verify INF O2 service diff --git a/o2app/entrypoints/flask_application.py b/o2app/entrypoints/flask_application.py index fff1201..f74dca2 100644 --- a/o2app/entrypoints/flask_application.py +++ b/o2app/entrypoints/flask_application.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Wind River Systems, Inc. +# Copyright (C) 2021-2022 Wind River Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,9 +21,32 @@ from o2ims.views import configure_namespace as ims_route_configure_namespace from o2dms.api import configure_namespace as dms_route_configure_namespace from o2ims.adapter.clients.alarm_dict_client import load_alarm_definition +from o2common.authmw import authmiddleware +from o2common.authmw import authprov +from o2common.config.config import get_review_url +from o2common.helper import o2logging # apibase = config.get_o2ims_api_base() +auth = True app = Flask(__name__) +logger = o2logging.get_logger(__name__) + + +def _get_k8s_url(): + try: + token_review_url = get_review_url() + return token_review_url + except Exception: + raise Exception('Get k8s token review url failed') + + +if auth: + # perform service account identity&privilege check. + _get_k8s_url() + ad = authprov.auth_definer('ad') + ad.sanity_check() + app.wsgi_app = authmiddleware.authmiddleware(app.wsgi_app) + app.config.SWAGGER_UI_DOC_EXPANSION = 'list' api = Api(app, version='1.0.0', title='INF O2 Services API', diff --git a/o2common/authmw/__init__.py b/o2common/authmw/__init__.py new file mode 100644 index 0000000..0e1e364 --- /dev/null +++ b/o2common/authmw/__init__.py @@ -0,0 +1,13 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/o2common/authmw/authmiddleware.py b/o2common/authmw/authmiddleware.py new file mode 100644 index 0000000..c70adfc --- /dev/null +++ b/o2common/authmw/authmiddleware.py @@ -0,0 +1,83 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from werkzeug.wrappers import Request, Response +from o2common.helper import o2logging +from o2common.authmw.authprov import auth_definer + +logger = o2logging.get_logger(__name__) + + +class AuthRequiredExp(Exception): + def __init__(self, value): + self.value = value + + def dictize(self): + return { + 'WWW-Authenticate': '{}'.format(self.value)} + + +class AuthFailureExp(Exception): + def __init__(self, value): + self.value = value + + def dictize(self): + return { + 'WWW-Authenticate': '{}'.format(self.value)} + + +def _response_wrapper(environ, start_response, header): + res = Response(headers=header, + mimetype='text/plain', status=401) + return res(environ, start_response) + + +class authmiddleware(): + + ''' + Auth WSGI middleware + ''' + + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + logger.info(__name__ + 'authentication middleware') + req = Request(environ, populate_request=True, shallow=True) + try: + auth_header = req.headers['Authorization'] + + if auth_header: + auth_token = auth_header.split(" ")[1] + + ad = auth_definer('oauth') + # invoke underlying auth mdw to make k8s/keystone api + ret = ad.authenticate(auth_token) + if ret is True: + logger.info( + "auth success with oauth token: " + auth_token) + return self.app(environ, start_response) + else: + raise AuthFailureExp( + 'Bearer realm="Authentication Failed"') + else: + raise AuthRequiredExp('Bearer realm="Authentication Required"') + except AuthRequiredExp as ex: + return _response_wrapper(environ, start_response, ex.dictize()) + except AuthFailureExp as ex: + return _response_wrapper(environ, start_response, ex.dictize()) + except Exception: + hint = 'Bearer realm="Authentication Required"' + return _response_wrapper(environ, start_response, + AuthRequiredExp(hint).dictize()) diff --git a/o2common/authmw/authprov.py b/o2common/authmw/authprov.py new file mode 100644 index 0000000..17c5349 --- /dev/null +++ b/o2common/authmw/authprov.py @@ -0,0 +1,144 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ssl +from o2common.helper import o2logging +import urllib.request +import urllib.parse +import json + +from o2common.config.config import get_auth_provider, get_review_url +from o2common.config.config import get_reviewer_token + +ssl._create_default_https_context = ssl._create_unverified_context +logger = o2logging.get_logger(__name__) + +# read the conf from config file +auth_prv_conf = get_auth_provider() + +try: + token_review_url = get_review_url() +except Exception: + raise Exception('Get k8s token review url failed') + + +class K8SAuthenticaException(Exception): + def __init__(self, value): + self.value = value + + +class K8SAuthorizationException(Exception): + def __init__(self, value): + self.value = value + + +class auth_definer(): + + def __init__(self, name): + super().__init__() + self.name = name + if auth_prv_conf == 'k8s': + self.obj = k8s_auth_provider('k8s') + else: + self.obj = keystone_auth_provider('keystone') + + def tokenissue(self): + return self.obj.tokenissue() + + def sanity_check(self): + return self.obj.sanity_check() + + # call k8s api + def authenticate(self, token): + return self.obj.authenticate(token) + + def __repr__(self) -> str: + return "" % self.name + + +class k8s_auth_provider(auth_definer): + + def __init__(self, name): + self.name = name + + def tokenissue(self, **args2): + pass + + def sanity_check(self): + try: + self.authenticate('faketoken') + except Exception as ex: + logger.critical( + 'Failed to bootstrap oauth middleware with exp: ' + str(ex)) + raise Exception(str(ex)) + + def authenticate(self, token): + reviewer_token = get_reviewer_token() + tokenreview = { + "kind": "TokenReview", + "apiVersion": "authentication.k8s.io/v1", + "metadata": { + "creationTimestamp": None + }, + "spec": { + "token": ""+token + }, + "status": { + "user": {} + } + } + datas = json.dumps(tokenreview) + binary_data = datas.encode('utf-8') + # 'post' method + header = {'Authorization': 'Bearer '+reviewer_token, + 'Content-Type': 'application/json'} + try: + req = urllib.request.Request( + token_review_url, data=binary_data, headers=header) + response = urllib.request.urlopen(req) + data = json.load(response) + if data['status']['authenticated'] is True: + logger.info("Authenticated.") + return True + except Exception as ex: + strex = str(ex) + logger.warning( + "Invoke K8s API Service Exception happened:" + strex) + if '403' in strex: + raise K8SAuthorizationException( + 'No privilege to perform oauth token check.') + elif '401' in strex: + raise K8SAuthenticaException( + 'Self Authentication failure.') + return False + + def tokenrevoke(self, **args2): + return True + + +class keystone_auth_provider(auth_definer): + def __init__(self, name): + self.name = name + + def tokenissue(self, *args1, **args2): + pass + + def authenticate(self, *args1, **args2): + return False + + def sanity_check(self): + pass + + def tokenrevoke(self, *args1, **args2): + return False diff --git a/o2common/config/config.py b/o2common/config/config.py index cf1c08c..b1d2cae 100644 --- a/o2common/config/config.py +++ b/o2common/config/config.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Wind River Systems, Inc. +# Copyright (C) 2021-2022 Wind River Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -270,3 +270,44 @@ def get_events_yaml_filename(): if events_yaml_name is not None and os.path.isfile(events_yaml_name): return events_yaml_name return "/configs/events.yaml" + +# get k8s host from env: + + +def get_k8s_host(): + k8s_host = os.environ.get("KUBERNETES_SERVICE_HOST") + if k8s_host is None: + raise Exception('Get k8s host failed.') + return k8s_host + +# get k8s host port from env: + + +def get_k8s_port(): + k8s_port = os.environ.get("KUBERNETES_SERVICE_PORT_HTTPS", '443') + return k8s_port + +# token review url + + +def get_review_url(): + try: + api = '/apis/authentication.k8s.io/v1/tokenreviews' + return "{0}{1}:{2}{3}".format( + 'https://', get_k8s_host(), get_k8s_port(), api) + except Exception: + raise Exception('Get k8s review url failed') + +# get reviewer token + + +def get_reviewer_token(): + # token path default is below. + token_path = '/var/run/secrets/kubernetes.io/serviceaccount/token' + with open(token_path, 'r') as f: + ctt = f.read() + return ctt + + +def get_auth_provider(): + return 'k8s' diff --git a/tests/unit/test_watcher.py b/tests/unit/test_watcher.py index 4bb707f..5e1960f 100644 --- a/tests/unit/test_watcher.py +++ b/tests/unit/test_watcher.py @@ -195,10 +195,10 @@ def test_watchers_worker(): count1 = fakewatcher.fakeOcloudWatcherCounter testedworker.start() - time.sleep(20) + time.sleep(1) assert fakewatcher.fakeOcloudWatcherCounter > count1 # assumed hacking: probe has stopped the sched task count3 = fakewatcher.fakeOcloudWatcherCounter - time.sleep(3) + time.sleep(1) assert fakewatcher.fakeOcloudWatcherCounter == count3 -- 2.16.6 From f7ef52a5b4ead0472b1b5828471b28c88d2a0aea Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Mon, 4 Jul 2022 00:39:41 +0800 Subject: [PATCH 07/16] Pagination in request and response; Fix alarm client issue 1. Add two pagination files as common for pagination. 2. Give a layer of pagination in response. 3. Add 'first', 'prev', 'next', 'last' in the Link Header. Issue-ID: INF-288 Signed-off-by: Zhang Rong(Jon) Change-Id: I3047fb7a4a2b4d6480f706ee7773cbe0b69d405d --- o2common/domain/base.py | 8 ++-- o2common/views/pagination_route.py | 61 ++++++++++++++++++++++++++ o2common/views/pagination_view.py | 52 ++++++++++++++++++++++ o2ims/adapter/alarm_repository.py | 26 ++++++++--- o2ims/adapter/clients/fault_client.py | 30 ++++++------- o2ims/adapter/ocloud_repository.py | 67 ++++++++++++++++++++++------ o2ims/domain/alarm_obj.py | 15 ++++++- o2ims/domain/alarm_repo.py | 24 ++++++++--- o2ims/domain/commands.py | 9 +++- o2ims/domain/ocloud.py | 1 - o2ims/domain/ocloud_repo.py | 45 +++++++++++++++---- o2ims/domain/subscription_repo.py | 14 ++++-- o2ims/service/auditor/alarm_handler.py | 2 +- o2ims/views/alarm_route.py | 28 ++++++++++-- o2ims/views/alarm_view.py | 17 +++++--- o2ims/views/ocloud_dto.py | 41 +++++++++++++++++- o2ims/views/ocloud_route.py | 79 +++++++++++++++++++++++++++++++--- o2ims/views/ocloud_view.py | 49 +++++++++++++-------- requirements-test.txt | 1 + tests/conftest.py | 8 ++++ tests/unit/test_alarm.py | 64 ++++++++++++++++++++++++--- tests/unit/test_ocloud.py | 75 +++++++++++++++++++++++--------- 22 files changed, 596 insertions(+), 120 deletions(-) create mode 100644 o2common/views/pagination_route.py create mode 100644 o2common/views/pagination_view.py diff --git a/o2common/domain/base.py b/o2common/domain/base.py index f419f23..128f950 100644 --- a/o2common/domain/base.py +++ b/o2common/domain/base.py @@ -40,10 +40,10 @@ class Serializer(object): def serialize(self): try: d = {c: getattr(self, c) for c in inspect(self).attrs.keys()} - if 'createtime' in d: - d['createtime'] = d['createtime'].isoformat() - if 'updatetime' in d: - d['updatetime'] = d['updatetime'].isoformat() + # if 'createtime' in d: + # d['createtime'] = d['createtime'].isoformat() + # if 'updatetime' in d: + # d['updatetime'] = d['updatetime'].isoformat() return d # return {c: getattr(self, c) for c in inspect(self).attrs.keys()} except NoInspectionAvailable: diff --git a/o2common/views/pagination_route.py b/o2common/views/pagination_route.py new file mode 100644 index 0000000..e7b738f --- /dev/null +++ b/o2common/views/pagination_route.py @@ -0,0 +1,61 @@ +# Copyright (C) 2021-2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from urllib.parse import urlparse +from flask import abort + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + +PAGE_PARAM = 'nextpage_opaque_marker' + + +def link_header(full_path: str, ret): + base_url = urlparse(full_path) + count = ret.pop('count') + page_total = ret.pop('page_total') + page_current = ret.pop('page_current') + + if page_current > page_total: + abort(400, "Page size {} bad request.".format(page_current)) + + if 0 == count: + return [], {'X-Total-Count': count} + + query = "&".join(["{}".format(q) for q in base_url.query.split( + '&') if q.split('=')[0] != PAGE_PARAM]) + if query != '': + query = query + '&' + logger.warning(query) + + link_list = [] + if (page_current > 1): + parsed = base_url._replace(query=query + PAGE_PARAM + '=1') + link_list.append('<' + parsed.geturl() + '>; rel="first"') + if (page_current > 1): + parsed = base_url._replace( + query=query + PAGE_PARAM + '=' + str(page_current - 1)) + link_list.append('<' + parsed.geturl() + '>; rel="prev"') + if (page_current < page_total): + parsed = base_url._replace( + query=query + PAGE_PARAM + '=' + str(page_current + 1)) + link_list.append('<' + parsed.geturl() + '>; rel="next"') + if (page_current < page_total): + parsed = base_url._replace( + query=query + PAGE_PARAM + '=' + str(page_total)) + link_list.append('<' + parsed.geturl() + '>; rel="last"') + if 0 == len(link_list): + return ret.pop('results'), {'X-Total-Count': count} + link = ','.join(link_list) + return ret.pop('results'), {'X-Total-Count': count, 'Link': link} diff --git a/o2common/views/pagination_view.py b/o2common/views/pagination_view.py new file mode 100644 index 0000000..6546ebe --- /dev/null +++ b/o2common/views/pagination_view.py @@ -0,0 +1,52 @@ +# Copyright (C) 2021-2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Tuple + +from o2common.domain.base import Serializer + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +class Pagination: + def __init__(self, **kwargs) -> None: + # filter key should be the same with database name + self.filter_kwargs = {} + self.limit = int(kwargs['per_page']) if 'per_page' in kwargs else 30 + self.page = int(kwargs['page']) if 'page' in kwargs else 1 + if self.page < 1: + self.page = 1 + self.start = (self.page - 1) * self.limit + self.filter_kwargs['limit'] = self.limit + self.filter_kwargs['start'] = self.start + + def get_filter(self): + return self.filter_kwargs + + def get_result(self, ret: Tuple[int, List[Serializer]]): + count = ret[0] + logger.info('List count: {}'.format(count)) + ret_list = ret[1] + page_total = int(math.ceil(count/self.limit) + ) if count > self.limit else 1 + result = { + "count": count, + "page_total": page_total, + "page_current": self.page, + "per_page": self.limit, + "results": [r.serialize() for r in ret_list] + } + return result diff --git a/o2ims/adapter/alarm_repository.py b/o2ims/adapter/alarm_repository.py index ef20e6a..ef7ae26 100644 --- a/o2ims/adapter/alarm_repository.py +++ b/o2ims/adapter/alarm_repository.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Tuple from o2ims.domain import alarm_obj from o2ims.domain.alarm_repo import AlarmDefinitionRepository, \ @@ -34,8 +34,16 @@ class AlarmEventRecordSqlAlchemyRepository(AlarmEventRecordRepository): return self.session.query(alarm_obj.AlarmEventRecord).filter_by( alarmEventRecordId=alarm_event_record_id).first() - def _list(self) -> List[alarm_obj.AlarmEventRecord]: - return self.session.query(alarm_obj.AlarmEventRecord) + def _list(self, **kwargs) -> Tuple[int, List[alarm_obj.AlarmEventRecord]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(alarm_obj.AlarmEventRecord).filter_by( + **kwargs).order_by('alarmEventRecordId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, alarm_event_record: alarm_obj.AlarmEventRecord): self.session.add(alarm_event_record) @@ -80,8 +88,16 @@ class AlarmSubscriptionSqlAlchemyRepository(AlarmSubscriptionRepository): return self.session.query(alarm_obj.AlarmSubscription).filter_by( alarmSubscriptionId=subscription_id).first() - def _list(self) -> List[alarm_obj.AlarmSubscription]: - return self.session.query(alarm_obj.AlarmSubscription) + def _list(self, **kwargs) -> Tuple[int, List[alarm_obj.AlarmSubscription]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(alarm_obj.AlarmSubscription).filter_by( + **kwargs).order_by('alarmSubscriptionId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, subscription: alarm_obj.AlarmSubscription): self.session.add(subscription) diff --git a/o2ims/adapter/clients/fault_client.py b/o2ims/adapter/clients/fault_client.py index b37a4d2..45a27ff 100644 --- a/o2ims/adapter/clients/fault_client.py +++ b/o2ims/adapter/clients/fault_client.py @@ -27,7 +27,6 @@ from fmclient.common.exceptions import HTTPNotFound from o2common.service.client.base_client import BaseClient from o2common.config import config from o2ims.domain import alarm_obj as alarmModel -from o2ims.domain.resource_type import ResourceTypeEnum from o2app.adapter import unit_of_work from o2common.helper import o2logging @@ -126,7 +125,7 @@ class StxFaultClientImp(object): logger.debug('alarm 1:' + str(alarms[0].to_dict())) # [print('alarm:' + str(alarm.to_dict())) for alarm in alarms if alarm] return [alarmModel.FaultGenericModel( - ResourceTypeEnum.PSERVER, self._alarmconverter(alarm)) + alarmModel.EventTypeEnum.ALARM, self._alarmconverter(alarm)) for alarm in alarms if alarm] def getAlarmInfo(self, id) -> alarmModel.FaultGenericModel: @@ -137,16 +136,17 @@ class StxFaultClientImp(object): except HTTPNotFound: event = self.fmclient.event_log.get(id) return alarmModel.FaultGenericModel( - ResourceTypeEnum.PSERVER, self._eventconverter(event, True)) + alarmModel.EventTypeEnum.ALARM, self._eventconverter(event, + True)) return alarmModel.FaultGenericModel( - ResourceTypeEnum.PSERVER, self._alarmconverter(alarm)) + alarmModel.EventTypeEnum.ALARM, self._alarmconverter(alarm)) def getEventList(self, **filters) -> List[alarmModel.FaultGenericModel]: events = self.fmclient.event_log.list(alarms=True, expand=True) logger.debug('event 1:' + str(events[0].to_dict())) # [print('alarm:' + str(event.to_dict())) for event in events if event] return [alarmModel.FaultGenericModel( - ResourceTypeEnum.PSERVER, self._eventconverter(event)) + alarmModel.EventTypeEnum.EVENT, self._eventconverter(event)) for event in events if event] def getEventInfo(self, id) -> alarmModel.FaultGenericModel: @@ -154,20 +154,18 @@ class StxFaultClientImp(object): logger.debug('get event id ' + id + ':' + str(event.to_dict())) # print(event.to_dict()) return alarmModel.FaultGenericModel( - ResourceTypeEnum.PSERVER, self._eventconverter(event)) + alarmModel.EventTypeEnum.EVENT, self._eventconverter(event)) @ staticmethod def _alarmconverter(alarm): # setattr(alarm, 'alarm_def_id', uuid.uuid3( # uuid.NAMESPACE_URL, alarm.alarm_id)) setattr(alarm, 'state', alarm.alarm_state) - setattr(alarm, 'event_log_type', alarm.alarm_type) - setattr(alarm, 'event_log_id', alarm.alarm_id) - setattr(alarm, 'alarm_def_id', uuid.uuid3( - uuid.NAMESPACE_URL, alarm.alarm_id)) - setattr(alarm, 'probable_cause_id', uuid.uuid3( - uuid.NAMESPACE_URL, alarm.probale_cause)) + setattr(alarm, 'alarm_def_id', str(uuid.uuid3( + uuid.NAMESPACE_URL, alarm.alarm_id))) + setattr(alarm, 'probable_cause_id', str(uuid.uuid3( + uuid.NAMESPACE_URL, alarm.probable_cause))) return alarm @ staticmethod @@ -177,10 +175,10 @@ class StxFaultClientImp(object): if clear: logger.debug('alarm is clear') event.state = 'clear' - setattr(event, 'alarm_def_id', uuid.uuid3( - uuid.NAMESPACE_URL, event.alarm_id)) - setattr(event, 'probable_cause_id', uuid.uuid3( - uuid.NAMESPACE_URL, event.probale_cause)) + setattr(event, 'alarm_def_id', str(uuid.uuid3( + uuid.NAMESPACE_URL, event.alarm_id))) + setattr(event, 'probable_cause_id', str(uuid.uuid3( + uuid.NAMESPACE_URL, event.probable_cause))) return event @ staticmethod diff --git a/o2ims/adapter/ocloud_repository.py b/o2ims/adapter/ocloud_repository.py index d48aa06..b26e98a 100644 --- a/o2ims/adapter/ocloud_repository.py +++ b/o2ims/adapter/ocloud_repository.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Tuple from o2ims.domain import ocloud, subscription_obj from o2ims.domain.ocloud_repo import OcloudRepository, ResourceTypeRepository,\ @@ -58,8 +58,16 @@ class ResouceTypeSqlAlchemyRepository(ResourceTypeRepository): return self.session.query(ocloud.ResourceType).filter_by( name=resource_type_name).first() - def _list(self) -> List[ocloud.ResourceType]: - return self.session.query(ocloud.ResourceType) + def _list(self, **kwargs) -> Tuple[int, List[ocloud.ResourceType]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(ocloud.ResourceType).filter_by( + **kwargs).order_by('resourceTypeId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, resourceType: ocloud.ResourceType): self.session.add(resourceType) @@ -77,8 +85,16 @@ class ResourcePoolSqlAlchemyRepository(ResourcePoolRepository): return self.session.query(ocloud.ResourcePool).filter_by( resourcePoolId=resource_pool_id).first() - def _list(self) -> List[ocloud.ResourcePool]: - return self.session.query(ocloud.ResourcePool) + def _list(self, **kwargs) -> Tuple[int, List[ocloud.ResourcePool]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(ocloud.ResourcePool).filter_by( + **kwargs).order_by('resourcePoolId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, resourcePool: ocloud.ResourcePool): self.session.add(resourcePool) @@ -115,9 +131,19 @@ class ResourceSqlAlchemyRepository(ResourceRepository): return res return recursive(resource_id) - def _list(self, resourcepool_id, **kwargs) -> List[ocloud.Resource]: - return self.session.query(ocloud.Resource).filter_by( - resourcePoolId=resourcepool_id, **kwargs) + def _list(self, resourcepool_id, **kwargs) -> \ + Tuple[int, List[ocloud.Resource]]: + if 'sort' in kwargs: + kwargs.pop('sort') + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(ocloud.Resource).filter_by( + resourcePoolId=resourcepool_id, **kwargs).order_by('resourceId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, resource: ocloud.Resource): self.session.add(resource) @@ -135,8 +161,16 @@ class DeploymentManagerSqlAlchemyRepository(DeploymentManagerRepository): return self.session.query(ocloud.DeploymentManager).filter_by( deploymentManagerId=deployment_manager_id).first() - def _list(self) -> List[ocloud.DeploymentManager]: - return self.session.query(ocloud.DeploymentManager) + def _list(self, **kwargs) -> Tuple[int, List[ocloud.DeploymentManager]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(ocloud.DeploymentManager).filter_by( + **kwargs).order_by('deploymentManagerId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, deployment_manager: ocloud.DeploymentManager): self.session.add(deployment_manager) @@ -154,8 +188,17 @@ class SubscriptionSqlAlchemyRepository(SubscriptionRepository): return self.session.query(subscription_obj.Subscription).filter_by( subscriptionId=subscription_id).first() - def _list(self) -> List[subscription_obj.Subscription]: - return self.session.query(subscription_obj.Subscription) + def _list(self, **kwargs) -> \ + Tuple[int, List[subscription_obj.Subscription]]: + size = kwargs.pop('limit') if 'limit' in kwargs else None + offset = kwargs.pop('start') if 'start' in kwargs else 0 + + result = self.session.query(subscription_obj.Subscription).filter_by( + **kwargs).order_by('subscriptionId') + count = result.count() + if size is not None and size != -1: + return (count, result.limit(size).offset(offset)) + return (count, result) def _update(self, subscription: subscription_obj.Subscription): self.session.add(subscription) diff --git a/o2ims/domain/alarm_obj.py b/o2ims/domain/alarm_obj.py index fa7cba2..9b3ff4f 100644 --- a/o2ims/domain/alarm_obj.py +++ b/o2ims/domain/alarm_obj.py @@ -19,6 +19,9 @@ import datetime from o2common.domain.base import AgRoot, Serializer +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + class FaultGenericModel(AgRoot): def __init__(self, type: str, @@ -27,7 +30,10 @@ class FaultGenericModel(AgRoot): if api_response: self.id = str(api_response.uuid) self.name = self.id - self.type = type + self.alarm_type = api_response.alarm_type + self.alarm_def_name = api_response.alarm_id + self.alarm_def_id = api_response.alarm_def_id + self.probable_cause_id = api_response.probable_cause_id self.status = api_response.state # TODO: time less than second self.timestamp = datetime.datetime.strptime( @@ -42,6 +48,8 @@ class FaultGenericModel(AgRoot): self.hash = content_hash if content_hash \ else str(hash((self.id, self.timestamp, self.status))) self.content = json.dumps(api_response.to_dict()) + if EventTypeEnum.ALARM == type: + pass def is_outdated(self, newmodel) -> bool: # return self.updatetime < newmodel.updatetime @@ -58,6 +66,11 @@ class FaultGenericModel(AgRoot): self.content = newmodel.content +class EventTypeEnum(Enum): + ALARM = 'alarm' + EVENT = 'event' + + class PerceivedSeverityEnum(str, Enum): CRITICAL = 0 MAJOR = 1 diff --git a/o2ims/domain/alarm_repo.py b/o2ims/domain/alarm_repo.py index d3e7f52..d712f1c 100644 --- a/o2ims/domain/alarm_repo.py +++ b/o2ims/domain/alarm_repo.py @@ -13,7 +13,7 @@ # limitations under the License. import abc -from typing import List, Set +from typing import List, Set, Tuple from o2ims.domain import alarm_obj as obj @@ -31,8 +31,12 @@ class AlarmEventRecordRepository(abc.ABC): self.seen.add(alarm_event_record) return alarm_event_record - def list(self) -> List[obj.AlarmEventRecord]: - return self._list() + def list(self, **kwargs) -> List[obj.AlarmEventRecord]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[obj.AlarmEventRecord]]: + return self._list(**kwargs) def update(self, alarm_event_record: obj.AlarmEventRecord): self._update(alarm_event_record) @@ -49,7 +53,7 @@ class AlarmEventRecordRepository(abc.ABC): raise NotImplementedError @abc.abstractmethod - def _list(self) -> List[obj.AlarmEventRecord]: + def _list(self, **kwargs) -> Tuple[int, List[obj.AlarmEventRecord]]: raise NotImplementedError @abc.abstractmethod @@ -155,8 +159,12 @@ class AlarmSubscriptionRepository(abc.ABC): self.seen.add(subscription) return subscription - def list(self) -> List[obj.AlarmSubscription]: - return self._list() + def list(self, **kwargs) -> List[obj.AlarmSubscription]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[obj.AlarmSubscription]]: + return self._list(**kwargs) def update(self, subscription: obj.AlarmSubscription): self._update(subscription) @@ -172,6 +180,10 @@ class AlarmSubscriptionRepository(abc.ABC): def _get(self, subscription_id) -> obj.AlarmSubscription: raise NotImplementedError + @abc.abstractmethod + def _list(self, **kwargs) -> Tuple[int, List[obj.AlarmSubscription]]: + raise NotImplementedError + @abc.abstractmethod def _update(self, subscription: obj.AlarmSubscription): raise NotImplementedError diff --git a/o2ims/domain/commands.py b/o2ims/domain/commands.py index 4ab1b25..25c9629 100644 --- a/o2ims/domain/commands.py +++ b/o2ims/domain/commands.py @@ -18,7 +18,7 @@ from dataclasses import dataclass # from typing import List from o2ims.domain.stx_object import StxGenericModel -from o2ims.domain.alarm_obj import AlarmEvent2SMO +from o2ims.domain.alarm_obj import AlarmEvent2SMO, FaultGenericModel from o2ims.domain.subscription_obj import Message2SMO, RegistrationMessage # from o2ims.domain.resource_type import ResourceTypeEnum from o2common.domain.commands import Command @@ -29,6 +29,11 @@ class UpdateStxObject(Command): data: StxGenericModel +@dataclass +class UpdateFaultObject(Command): + data: FaultGenericModel + + @dataclass class PubMessage2SMO(Command): data: Message2SMO @@ -100,5 +105,5 @@ class UpdatePserverIfPort(UpdateResource): @dataclass -class UpdateAlarm(UpdateStxObject): +class UpdateAlarm(UpdateFaultObject): pass diff --git a/o2ims/domain/ocloud.py b/o2ims/domain/ocloud.py index ce55aee..047fb8b 100644 --- a/o2ims/domain/ocloud.py +++ b/o2ims/domain/ocloud.py @@ -47,7 +47,6 @@ class DeploymentManager(AgRoot, Serializer): self.extensions = [] def serialize(self): - print(self.__dict__) d = Serializer.serialize(self) if 'profile' in d and d['profile'] != '': diff --git a/o2ims/domain/ocloud_repo.py b/o2ims/domain/ocloud_repo.py index 811e85c..9f86c9d 100644 --- a/o2ims/domain/ocloud_repo.py +++ b/o2ims/domain/ocloud_repo.py @@ -13,7 +13,7 @@ # limitations under the License. import abc -from typing import List, Set +from typing import List, Set, Tuple from o2ims.domain import ocloud @@ -74,8 +74,12 @@ class ResourceTypeRepository(abc.ABC): self.seen.add(resource_type) return resource_type - def list(self) -> List[ocloud.ResourceType]: - return self._list() + def list(self, **kwargs) -> List[ocloud.ResourceType]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[ocloud.ResourceType]]: + return self._list(**kwargs) def update(self, resource_type: ocloud.ResourceType): self._update(resource_type) @@ -93,6 +97,10 @@ class ResourceTypeRepository(abc.ABC): def _get_by_name(self, resource_type_name) -> ocloud.ResourceType: raise NotImplementedError + @abc.abstractmethod + def _list(self, **kwargs) -> Tuple[int, List[ocloud.ResourceType]]: + raise NotImplementedError + @abc.abstractmethod def _update(self, resource_type: ocloud.ResourceType): raise NotImplementedError @@ -112,8 +120,12 @@ class ResourcePoolRepository(abc.ABC): self.seen.add(resource_pool) return resource_pool - def list(self) -> List[ocloud.ResourcePool]: - return self._list() + def list(self, **kwargs) -> List[ocloud.ResourcePool]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[ocloud.ResourcePool]]: + return self._list(**kwargs) def update(self, resource_pool: ocloud.ResourcePool): self._update(resource_pool) @@ -127,6 +139,10 @@ class ResourcePoolRepository(abc.ABC): def _get(self, resource_pool_id) -> ocloud.ResourcePool: raise NotImplementedError + @abc.abstractmethod + def _list(self, **kwargs) -> Tuple[int, List[ocloud.ResourcePool]]: + raise NotImplementedError + @abc.abstractmethod def _update(self, resource_pool: ocloud.ResourcePool): raise NotImplementedError @@ -147,6 +163,10 @@ class ResourceRepository(abc.ABC): return resource def list(self, resourcepool_id, **kwargs) -> List[ocloud.Resource]: + return self._list(resourcepool_id, **kwargs)[1] + + def list_with_count(self, resourcepool_id, **kwargs) -> \ + Tuple[int, List[ocloud.Resource]]: return self._list(resourcepool_id, **kwargs) def update(self, resource: ocloud.Resource): @@ -162,7 +182,8 @@ class ResourceRepository(abc.ABC): raise NotImplementedError @abc.abstractmethod - def _list(self, resourcepool_id, **kwargs) -> ocloud.Resource: + def _list(self, resourcepool_id, **kwargs) -> \ + Tuple[int, List[ocloud.Resource]]: raise NotImplementedError @abc.abstractmethod @@ -184,8 +205,12 @@ class DeploymentManagerRepository(abc.ABC): self.seen.add(deployment_manager) return deployment_manager - def list(self) -> List[ocloud.DeploymentManager]: - return self._list() + def list(self, **kwargs) -> List[ocloud.DeploymentManager]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[ocloud.DeploymentManager]]: + return self._list(**kwargs) def update(self, deployment_manager: ocloud.DeploymentManager): self._update(deployment_manager) @@ -198,6 +223,10 @@ class DeploymentManagerRepository(abc.ABC): def _get(self, deployment_manager_id) -> ocloud.DeploymentManager: raise NotImplementedError + @abc.abstractmethod + def _list(self, **kwargs) -> Tuple[int, List[ocloud.DeploymentManager]]: + raise NotImplementedError + @abc.abstractmethod def _update(self, deployment_manager: ocloud.DeploymentManager): raise NotImplementedError diff --git a/o2ims/domain/subscription_repo.py b/o2ims/domain/subscription_repo.py index d12c00d..44d7b0e 100644 --- a/o2ims/domain/subscription_repo.py +++ b/o2ims/domain/subscription_repo.py @@ -13,7 +13,7 @@ # limitations under the License. import abc -from typing import List, Set +from typing import List, Set, Tuple from o2ims.domain import subscription_obj as subobj @@ -31,8 +31,12 @@ class SubscriptionRepository(abc.ABC): self.seen.add(subscription) return subscription - def list(self) -> List[subobj.Subscription]: - return self._list() + def list(self, **kwargs) -> List[subobj.Subscription]: + return self._list(**kwargs)[1] + + def list_with_count(self, **kwargs) -> \ + Tuple[int, List[subobj.Subscription]]: + return self._list(**kwargs) def update(self, subscription: subobj.Subscription): self._update(subscription) @@ -52,6 +56,10 @@ class SubscriptionRepository(abc.ABC): def _update(self, subscription: subobj.Subscription): raise NotImplementedError + @abc.abstractmethod + def _list(self, **kwargs) -> Tuple[int, List[subobj.Subscription]]: + raise NotImplementedError + @abc.abstractmethod def _delete(self, subscription_id): raise NotImplementedError diff --git a/o2ims/service/auditor/alarm_handler.py b/o2ims/service/auditor/alarm_handler.py index 6288531..8917264 100644 --- a/o2ims/service/auditor/alarm_handler.py +++ b/o2ims/service/auditor/alarm_handler.py @@ -114,7 +114,7 @@ def create_by(fmobj: FaultGenericModel) -> AlarmEventRecord: else: return alarm_obj.PerceivedSeverityEnum.WARNING alarm_event_record.perceivedSeverity = severity_switch(content['severity']) - alarm_event_record.probableCauseId = content['probable_cause_id'] + alarm_event_record.probableCauseId = fmobj.probable_cause_id alarm_event_record.hash = fmobj.hash # logger.info('severity: ' + content['severity']) # logger.info('perceived severity: ' diff --git a/o2ims/views/alarm_route.py b/o2ims/views/alarm_route.py index 91ca8f8..6b32eee 100644 --- a/o2ims/views/alarm_route.py +++ b/o2ims/views/alarm_route.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from flask_restx import Resource +from flask import request +from flask_restx import Resource, reqparse from o2common.service.messagebus import MessageBus +from o2common.views.pagination_route import link_header, PAGE_PARAM from o2ims.views import alarm_view from o2ims.views.api_ns import api_monitoring_v1 from o2ims.views.alarm_dto import AlarmDTO, SubscriptionDTO @@ -31,13 +33,25 @@ def configure_api_route(): # ---------- Alarm Event Record ---------- # @api_monitoring_v1.route("/alarms") +@api_monitoring_v1.param(PAGE_PARAM, + 'Page number of the results to fetch.' + + ' Default: 1', + _in='query', default=1) class AlarmListRouter(Resource): model = AlarmDTO.alarm_event_record_get @api_monitoring_v1.marshal_list_with(model) def get(self): - return alarm_view.alarm_event_records(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = alarm_view.alarm_event_records(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_monitoring_v1.route("/alarms/") @@ -68,7 +82,15 @@ class SubscriptionsListRouter(Resource): @api_monitoring_v1.doc('List alarm subscriptions') @api_monitoring_v1.marshal_list_with(model) def get(self): - return alarm_view.subscriptions(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = alarm_view.subscriptions(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_monitoring_v1.doc('Create a alarm subscription') @api_monitoring_v1.expect(expect) diff --git a/o2ims/views/alarm_view.py b/o2ims/views/alarm_view.py index 258e323..a308429 100644 --- a/o2ims/views/alarm_view.py +++ b/o2ims/views/alarm_view.py @@ -15,6 +15,7 @@ import uuid as uuid from o2common.service import unit_of_work +from o2common.views.pagination_view import Pagination from o2ims.views.alarm_dto import SubscriptionDTO from o2ims.domain.alarm_obj import AlarmSubscription @@ -23,10 +24,12 @@ from o2common.helper import o2logging logger = o2logging.get_logger(__name__) -def alarm_event_records(uow: unit_of_work.AbstractUnitOfWork): +def alarm_event_records(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.alarm_event_records.list() - return [r.serialize() for r in li] + li = uow.alarm_event_records.list_with_count(**filter_kwargs) + return pagination.get_result(li) def alarm_event_record_one(alarmEventRecordId: str, @@ -36,10 +39,12 @@ def alarm_event_record_one(alarmEventRecordId: str, return first.serialize() if first is not None else None -def subscriptions(uow: unit_of_work.AbstractUnitOfWork): +def subscriptions(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.alarm_subscriptions.list() - return [r.serialize() for r in li] + li = uow.alarm_subscriptions.list_with_count(**filter_kwargs) + return pagination.get_result(li) def subscription_one(subscriptionId: str, diff --git a/o2ims/views/ocloud_dto.py b/o2ims/views/ocloud_dto.py index 651de97..c8e5202 100644 --- a/o2ims/views/ocloud_dto.py +++ b/o2ims/views/ocloud_dto.py @@ -62,7 +62,6 @@ class ResourcePoolDTO: class ResourceDTO: - resource_list = api_ims_inventory_v1.model( "ResourceListDto", { @@ -76,6 +75,46 @@ class ResourceDTO: } ) + list_result = api_ims_inventory_v1.model( + "ResourceListPagenationDto", + { + 'count': fields.Integer(), + 'page_num': fields.Integer(), + 'results': fields.List(fields.Nested(resource_list)) + } + ) + + # def get_paginated_list(results, url, start, limit): + # start = int(start) + # limit = int(limit) + # count = len(results) + # if count < start or limit < 0: + # api_ims_inventory_v1.abort(404) + # # make response + # obj = {} + # obj['start'] = start + # obj['limit'] = limit + # obj['count'] = count + # # make URLs + # # make previous url + # if start == 1: + # obj['previous'] = '' + # else: + # start_copy = max(1, start - limit) + # limit_copy = start - 1 + # obj['previous'] = url + \ + # '?start=%d&limit=%d' % (start_copy, limit_copy) + # # make next url + # if start + limit > count: + # obj['next'] = '' + # else: + # start_copy = start + limit + # obj['next'] = url + '?start=%d&limit=%d' % (start_copy, limit) + # # finally extract result according to bounds + # # obj['results'] = results[(start - 1):(start - 1 + limit)] + # obj['result'] = fields.List(fields.Nested(ResourceDTO.resource_list)) + # return obj + def recursive_resource_mapping(iteration_number=2): resource_json_mapping = { 'resourceId': fields.String(required=True, diff --git a/o2ims/views/ocloud_route.py b/o2ims/views/ocloud_route.py index 6b6a3fc..5feb6b8 100644 --- a/o2ims/views/ocloud_route.py +++ b/o2ims/views/ocloud_route.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from flask import request from flask_restx import Resource, reqparse from o2common.service.messagebus import MessageBus +from o2common.views.pagination_route import link_header, PAGE_PARAM from o2ims.views import ocloud_view from o2ims.views.api_ns import api_ims_inventory_v1 from o2ims.views.ocloud_dto import OcloudDTO, ResourceTypeDTO,\ @@ -51,13 +53,25 @@ class OcloudsListRouter(Resource): # ---------- ResourceTypes ---------- # @api_ims_inventory_v1.route("/resourceTypes") +@api_ims_inventory_v1.param(PAGE_PARAM, + 'Page number of the results to fetch.' + + ' Default: 1', + _in='query', default=1) class ResourceTypesListRouter(Resource): model = ResourceTypeDTO.resource_type_get @api_ims_inventory_v1.marshal_list_with(model) def get(self): - return ocloud_view.resource_types(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = ocloud_view.resource_types(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_ims_inventory_v1.route("/resourceTypes/") @@ -79,13 +93,25 @@ class ResourceTypeGetRouter(Resource): # ---------- ResourcePools ---------- # @api_ims_inventory_v1.route("/resourcePools") +@api_ims_inventory_v1.param(PAGE_PARAM, + 'Page number of the results to fetch.' + + ' Default: 1', + _in='query', default=1) class ResourcePoolsListRouter(Resource): model = ResourcePoolDTO.resource_pool_get @api_ims_inventory_v1.marshal_list_with(model) def get(self): - return ocloud_view.resource_pools(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = ocloud_view.resource_pools(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_ims_inventory_v1.route("/resourcePools/") @@ -109,9 +135,18 @@ class ResourcePoolGetRouter(Resource): @api_ims_inventory_v1.route("/resourcePools//resources") @api_ims_inventory_v1.param('resourcePoolID', 'ID of the resource pool') @api_ims_inventory_v1.param('resourceTypeName', 'filter resource type', - location='args') + _in='query') @api_ims_inventory_v1.param('parentId', 'filter parentId', - location='args') + _in='query') +# @api_ims_inventory_v1.param('sort', 'sort by column name', +# _in='query') +# @api_ims_inventory_v1.param('per_page', 'The number of results per page ' + +# '(max 100). Default: 30', +# _in='query', default=30) +@api_ims_inventory_v1.param(PAGE_PARAM, + 'Page number of the results to fetch.' + + ' Default: 1', + _in='query', default=1) class ResourcesListRouter(Resource): model = ResourceDTO.resource_list @@ -121,6 +156,9 @@ class ResourcesListRouter(Resource): parser = reqparse.RequestParser() parser.add_argument('resourceTypeName', location='args') parser.add_argument('parentId', location='args') + # parser.add_argument('sort', location='args') + # parser.add_argument('per_page', location='args') + parser.add_argument(PAGE_PARAM, location='args') args = parser.parse_args() kwargs = {} if args.resourceTypeName is not None: @@ -129,8 +167,15 @@ class ResourcesListRouter(Resource): kwargs['parentId'] = args.parentId if args.parentId.lower() == 'null': kwargs['parentId'] = None + # if args.per_page is not None: + # kwargs['per_page'] = args.per_page + # base_url = base_url + 'per_page=' + args.per_page + '&' + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker - return ocloud_view.resources(resourcePoolID, bus.uow, **kwargs) + ret = ocloud_view.resources(resourcePoolID, bus.uow, **kwargs) + + return link_header(request.full_path, ret) @api_ims_inventory_v1.route( @@ -156,13 +201,25 @@ class ResourceGetRouter(Resource): # ---------- DeploymentManagers ---------- # @api_ims_inventory_v1.route("/deploymentManagers") +@api_ims_inventory_v1.param(PAGE_PARAM, + 'Page number of the results to fetch.' + + ' Default: 1', + _in='query', default=1) class DeploymentManagersListRouter(Resource): model = DeploymentManagerDTO.deployment_manager_list @api_ims_inventory_v1.marshal_list_with(model) def get(self): - return ocloud_view.deployment_managers(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = ocloud_view.deployment_managers(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_ims_inventory_v1.route("/deploymentManagers/") @@ -204,7 +261,15 @@ class SubscriptionsListRouter(Resource): @api_ims_inventory_v1.doc('List subscriptions') @api_ims_inventory_v1.marshal_list_with(model) def get(self): - return ocloud_view.subscriptions(bus.uow) + parser = reqparse.RequestParser() + parser.add_argument(PAGE_PARAM, location='args') + args = parser.parse_args() + kwargs = {} + if args.nextpage_opaque_marker is not None: + kwargs['page'] = args.nextpage_opaque_marker + + ret = ocloud_view.subscriptions(bus.uow, **kwargs) + return link_header(request.full_path, ret) @api_ims_inventory_v1.doc('Create a subscription') @api_ims_inventory_v1.expect(expect) diff --git a/o2ims/views/ocloud_view.py b/o2ims/views/ocloud_view.py index 56f970d..ed55dda 100644 --- a/o2ims/views/ocloud_view.py +++ b/o2ims/views/ocloud_view.py @@ -20,12 +20,13 @@ from datetime import datetime import shutil from o2common.service import unit_of_work +from o2common.config import config +from o2common.views.pagination_view import Pagination from o2ims.domain import ocloud from o2ims.views.ocloud_dto import SubscriptionDTO from o2ims.domain.subscription_obj import Subscription from o2common.helper import o2logging -from o2common.config import config logger = o2logging.get_logger(__name__) @@ -41,10 +42,12 @@ def ocloud_one(ocloudid: str, uow: unit_of_work.AbstractUnitOfWork): return first.serialize() if first is not None else None -def resource_types(uow: unit_of_work.AbstractUnitOfWork): +def resource_types(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.resource_types.list() - return [r.serialize() for r in li] + li = uow.resource_types.list_with_count(**filter_kwargs) + return pagination.get_result(li) def resource_type_one(resourceTypeId: str, @@ -54,10 +57,12 @@ def resource_type_one(resourceTypeId: str, return first.serialize() if first is not None else None -def resource_pools(uow: unit_of_work.AbstractUnitOfWork): +def resource_pools(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.resource_pools.list() - return [r.serialize() for r in li] + li = uow.resource_pools.list_with_count(**filter_kwargs) + return pagination.get_result(li) def resource_pool_one(resourcePoolId: str, @@ -69,8 +74,9 @@ def resource_pool_one(resourcePoolId: str, def resources(resourcePoolId: str, uow: unit_of_work.AbstractUnitOfWork, **kwargs): - - filter_kwargs = {} # filter key should be the same with database name + pagination = Pagination(**kwargs) + # filter key should be the same with database name + filter_kwargs = pagination.get_filter() if 'resourceTypeName' in kwargs: resource_type_name = kwargs['resourceTypeName'] with uow: @@ -83,14 +89,15 @@ def resources(resourcePoolId: str, uow: unit_of_work.AbstractUnitOfWork, restype_id = '' if res_type is None else res_type.resourceTypeId filter_kwargs['resourceTypeId'] = restype_id - # li = uow.resources.list(resourcePoolId) - # return [r.serialize() for r in li if r.resourceTypeId == restype_id] if 'parentId' in kwargs: filter_kwargs['parentId'] = kwargs['parentId'] + if 'sort' in kwargs: + filter_kwargs['sort'] = kwargs['sort'] with uow: - li = uow.resources.list(resourcePoolId, **filter_kwargs) - return [r.serialize() for r in li] + ret = uow.resources.list_with_count(resourcePoolId, **filter_kwargs) + + return pagination.get_result(ret) def resource_one(resourceId: str, uow: unit_of_work.AbstractUnitOfWork): @@ -99,10 +106,12 @@ def resource_one(resourceId: str, uow: unit_of_work.AbstractUnitOfWork): return first.serialize() if first is not None else None -def deployment_managers(uow: unit_of_work.AbstractUnitOfWork): +def deployment_managers(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.deployment_managers.list() - return [r.serialize() for r in li] + li = uow.deployment_managers.list_with_count(**filter_kwargs) + return pagination.get_result(li) def deployment_manager_one(deploymentManagerId: str, @@ -177,10 +186,12 @@ def _gen_kube_config(dmId: str, kubeconfig: dict) -> dict: return '/configs/'+kube_config_name -def subscriptions(uow: unit_of_work.AbstractUnitOfWork): +def subscriptions(uow: unit_of_work.AbstractUnitOfWork, **kwargs): + pagination = Pagination(**kwargs) + filter_kwargs = pagination.get_filter() with uow: - li = uow.subscriptions.list() - return [r.serialize() for r in li] + li = uow.subscriptions.list_with_count(**filter_kwargs) + return pagination.get_result(li) def subscription_one(subscriptionId: str, diff --git a/requirements-test.txt b/requirements-test.txt index 4abb891..d60633c 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -8,6 +8,7 @@ pytest pytest-cov pytest-icdiff mock +mock-alchemy>=0.1.0,<0.2.0 tenacity pyOpenSSL diff --git a/tests/conftest.py b/tests/conftest.py index 68da7c4..b653ae8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,6 +27,7 @@ from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker, clear_mappers from tenacity import retry, stop_after_delay from unittest.mock import MagicMock +from mock_alchemy.mocking import UnifiedAlchemyMagicMock from o2app.bootstrap import bootstrap from o2ims.views import configure_namespace @@ -46,6 +47,13 @@ def mock_uow(): return session, uow +@pytest.fixture +def mock_alchemy_uow(): + session = UnifiedAlchemyMagicMock() + uow = unit_of_work.SqlAlchemyUnitOfWork(session_factory=session) + return session, uow + + @pytest.fixture def mock_flask_uow(mock_uow): session, uow = mock_uow diff --git a/tests/unit/test_alarm.py b/tests/unit/test_alarm.py index 5bcde82..0bc8ee2 100644 --- a/tests/unit/test_alarm.py +++ b/tests/unit/test_alarm.py @@ -52,11 +52,17 @@ def test_view_alarm_event_records(mock_uow): alarm_event_record1 = MagicMock() alarm_event_record1.serialize.return_value = { "alarmEventRecordId": alarm_event_record_id1} - session.return_value.query.return_value = [alarm_event_record1] - alarm_event_record_list = alarm_view.alarm_event_records(uow) - assert str(alarm_event_record_list[0].get( - "alarmEventRecordId")) == alarm_event_record_id1 + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [alarm_event_record1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = alarm_view.alarm_event_records(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("alarmEventRecordId")) == alarm_event_record_id1 def test_view_alarm_event_record_one(mock_uow): @@ -81,6 +87,50 @@ def test_view_alarm_event_record_one(mock_uow): "alarmEventRecordId")) == alarm_event_record_id1 +def test_view_alarm_subscriptions(mock_uow): + session, uow = mock_uow + + subscription_id1 = str(uuid.uuid4()) + sub1 = MagicMock() + sub1.serialize.return_value = { + "alarmSubscriptionId": subscription_id1, + } + + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [sub1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = alarm_view.subscriptions(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("alarmSubscriptionId")) == subscription_id1 + + +def test_view_alarm_subscription_one(mock_uow): + session, uow = mock_uow + + subscription_id1 = str(uuid.uuid4()) + session.return_value.query.return_value.filter_by.return_value.first.\ + return_value.serialize.return_value = None + + # Query return None + subscription_res = alarm_view.subscription_one( + subscription_id1, uow) + assert subscription_res is None + + session.return_value.query.return_value.filter_by.return_value.first.\ + return_value.serialize.return_value = { + "alarmSubscriptionId": subscription_id1, + } + + subscription_res = alarm_view.subscription_one( + subscription_id1, uow) + assert str(subscription_res.get( + "alarmSubscriptionId")) == subscription_id1 + + def test_alarm_dictionary(mock_uow): session, uow = mock_uow alarm_dict1 = alarm_obj.AlarmDictionary('test1') @@ -97,7 +147,11 @@ def test_alarm_dictionary(mock_uow): def test_flask_get_list(mock_flask_uow): session, app = mock_flask_uow - session.query.return_value = [] + order_by = MagicMock() + order_by.count.return_value = 0 + order_by.limit.return_value.offset.return_value = [] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by apibase = config.get_o2ims_monitoring_api_base() with app.test_client() as client: diff --git a/tests/unit/test_ocloud.py b/tests/unit/test_ocloud.py index 56d4e57..8be1e42 100644 --- a/tests/unit/test_ocloud.py +++ b/tests/unit/test_ocloud.py @@ -14,7 +14,6 @@ import uuid from unittest.mock import MagicMock -# from o2dms.domain import dms from o2ims.domain import ocloud, subscription_obj from o2ims.domain import resource_type as rt @@ -135,11 +134,17 @@ def test_view_resource_types(mock_uow): restype1 = MagicMock() restype1.serialize.return_value = { "resourceTypeId": resource_type_id1} - session.return_value.query.return_value = [restype1] - resource_type_list = ocloud_view.resource_types(uow) - assert str(resource_type_list[0].get( - "resourceTypeId")) == resource_type_id1 + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [restype1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = ocloud_view.resource_types(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("resourceTypeId")) == resource_type_id1 def test_view_resource_type_one(mock_uow): @@ -169,11 +174,17 @@ def test_view_resource_pools(mock_uow): respool1 = MagicMock() respool1.serialize.return_value = { "resourcePoolId": resource_pool_id1} - session.return_value.query.return_value = [respool1] - resource_pool_list = ocloud_view.resource_pools(uow) - assert str(resource_pool_list[0].get( - "resourcePoolId")) == resource_pool_id1 + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [respool1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = ocloud_view.resource_pools(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("resourcePoolId")) == resource_pool_id1 def test_view_resource_pool_one(mock_uow): @@ -207,9 +218,16 @@ def test_view_resources(mock_uow): "resourceId": resource_id1, "resourcePoolId": resource_pool_id1 } - session.return_value.query.return_value.filter_by.return_value = [res1] - resource_list = ocloud_view.resources(resource_pool_id1, uow) + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [res1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = ocloud_view.resources(resource_pool_id1, uow) + assert result['count'] == 1 + resource_list = result['results'] assert str(resource_list[0].get("resourceId")) == resource_id1 assert str(resource_list[0].get("resourcePoolId")) == resource_pool_id1 @@ -244,11 +262,18 @@ def test_view_deployment_managers(mock_uow): dm1.serialize.return_value = { "deploymentManagerId": deployment_manager_id1, } - session.return_value.query.return_value = [dm1] - deployment_manager_list = ocloud_view.deployment_managers(uow) - assert str(deployment_manager_list[0].get( - "deploymentManagerId")) == deployment_manager_id1 + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [dm1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = ocloud_view.deployment_managers(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("deploymentManagerId") + ) == deployment_manager_id1 def test_view_deployment_manager_one(mock_uow): @@ -313,11 +338,17 @@ def test_view_subscriptions(mock_uow): sub1.serialize.return_value = { "subscriptionId": subscription_id1, } - session.return_value.query.return_value = [sub1] - subscription_list = ocloud_view.subscriptions(uow) - assert str(subscription_list[0].get( - "subscriptionId")) == subscription_id1 + order_by = MagicMock() + order_by.count.return_value = 1 + order_by.limit.return_value.offset.return_value = [sub1] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by + + result = ocloud_view.subscriptions(uow) + assert result['count'] == 1 + ret_list = result['results'] + assert str(ret_list[0].get("subscriptionId")) == subscription_id1 def test_view_subscription_one(mock_uow): @@ -345,7 +376,11 @@ def test_view_subscription_one(mock_uow): def test_flask_get_list(mock_flask_uow): session, app = mock_flask_uow - session.query.return_value = [] + order_by = MagicMock() + order_by.count.return_value = 0 + order_by.limit.return_value.offset.return_value = [] + session.return_value.query.return_value.filter_by.return_value.\ + order_by.return_value = order_by apibase = config.get_o2ims_api_base() with app.test_client() as client: -- 2.16.6 From 074ae58151b0fe8ed0d1bebf981c0826d3b5e622 Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Fri, 21 Oct 2022 14:35:57 +0800 Subject: [PATCH 08/16] Update helm chart to refer built app file Issue-ID: INF-317 Signed-off-by: Bin Yang Change-Id: I183ad4460a90a12cd1ae43e8a418563b343d758e --- charts/resources/scripts/init/o2api_start.sh | 8 ++++---- charts/resources/scripts/init/o2pubsub_start.sh | 9 +++++---- charts/resources/scripts/init/o2watcher_start.sh | 9 +++++---- charts/templates/deployment.yaml | 6 +++--- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/charts/resources/scripts/init/o2api_start.sh b/charts/resources/scripts/init/o2api_start.sh index 65f3cbd..5c8fcd6 100644 --- a/charts/resources/scripts/init/o2api_start.sh +++ b/charts/resources/scripts/init/o2api_start.sh @@ -15,14 +15,14 @@ #!/bin/bash # pull latest code to debug -cd /root/ -git clone "https://gerrit.o-ran-sc.org/r/pti/o2" +# cd /root/ +# git clone "https://gerrit.o-ran-sc.org/r/pti/o2" # cd o2 # git pull https://gerrit.o-ran-sc.org/r/pti/o2 refs/changes/85/7085/5 # pip install retry -pip install -e /root/o2 - +# pip install -e /root/o2 +pip install -e /src cat <>/etc/hosts 127.0.0.1 api diff --git a/charts/resources/scripts/init/o2pubsub_start.sh b/charts/resources/scripts/init/o2pubsub_start.sh index 5dff1b3..6b54b12 100644 --- a/charts/resources/scripts/init/o2pubsub_start.sh +++ b/charts/resources/scripts/init/o2pubsub_start.sh @@ -15,10 +15,11 @@ #!/bin/bash # pull latest code to debug -cd /root/ -git clone "https://gerrit.o-ran-sc.org/r/pti/o2" -pip install -e /root/o2 +# cd /root/ +# git clone "https://gerrit.o-ran-sc.org/r/pti/o2" +# pip install -e /root/o2 -python /root/o2/o2app/entrypoints/redis_eventconsumer.py +# python /root/o2/o2app/entrypoints/redis_eventconsumer.py +python /src/o2app/entrypoints/redis_eventconsumer.py sleep infinity diff --git a/charts/resources/scripts/init/o2watcher_start.sh b/charts/resources/scripts/init/o2watcher_start.sh index 643c27d..d4add91 100644 --- a/charts/resources/scripts/init/o2watcher_start.sh +++ b/charts/resources/scripts/init/o2watcher_start.sh @@ -15,10 +15,11 @@ #!/bin/bash # pull latest code to debug -cd /root/ -git clone "https://gerrit.o-ran-sc.org/r/pti/o2" -pip install -e /root/o2 +# cd /root/ +# git clone "https://gerrit.o-ran-sc.org/r/pti/o2" +# pip install -e /root/o2 -python /root/o2/o2app/entrypoints/resource_watcher.py +# python /root/o2/o2app/entrypoints/resource_watcher.py +python /src/o2app/entrypoints/resource_watcher.py sleep infinity diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 5d82063..a9ea8fd 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -31,8 +31,8 @@ spec: app: o2api spec: serviceAccountName: {{ .Values.o2ims.serviceaccountname }} - imagePullSecrets: - - name: {{ .Values.o2ims.imagePullSecrets }} + # imagePullSecrets: + # - name: {{ .Values.o2ims.imagePullSecrets }} {{- if .Values.o2ims.affinity }} affinity: {{ toYaml .Values.o2ims.affinity | indent 8 }} @@ -117,7 +117,7 @@ spec: - name: DB_PASSWORD value: o2ims123 - name: FLASK_APP - value: /root/o2/o2app/entrypoints/flask_application.py + value: /src/o2app/entrypoints/flask_application.py - name: FLASK_DEBUG value: {{ .Values.o2ims.logginglevel }} - name: LOGGING_CONFIG_LEVEL -- 2.16.6 From e1224a5b71d160557a3f2a4e810d5f59d81a3b1d Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Fri, 21 Oct 2022 15:19:19 +0800 Subject: [PATCH 09/16] Bump major version to 2 reflect the apiName changes from o2ims_infrastructureInventory to o2ims-infrastructureInventory, and adding a new apiName for o2ims-infrastructureMonitoring Issue-ID: INF-318 Signed-off-by: Bin Yang Change-Id: I7eb91fb74f31f3eca9e7532d3da4e02845be952d --- stages/container-tag.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stages/container-tag.yaml b/stages/container-tag.yaml index 73a715b..64ab279 100644 --- a/stages/container-tag.yaml +++ b/stages/container-tag.yaml @@ -2,4 +2,4 @@ # By default this file is in the docker build directory, # but the location can configured in the JJB template. --- -tag: "1.0.0" +tag: "2.0.0" -- 2.16.6 From bd72340e71574c7095e94aa2719df66c3f711545 Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Fri, 21 Oct 2022 15:44:49 +0800 Subject: [PATCH 10/16] Add back secret with built-in user for o2 service Issue-ID: INF-317 Signed-off-by: Bin Yang Change-Id: Ia66f8b419d23600359c22bdc9b23295a9ffc7a3d --- charts/templates/deployment.yaml | 2 ++ charts/values.yaml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index a9ea8fd..82b675d 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -33,6 +33,8 @@ spec: serviceAccountName: {{ .Values.o2ims.serviceaccountname }} # imagePullSecrets: # - name: {{ .Values.o2ims.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.o2ims.serviceaccountname }}-registry-secret {{- if .Values.o2ims.affinity }} affinity: {{ toYaml .Values.o2ims.affinity | indent 8 }} diff --git a/charts/values.yaml b/charts/values.yaml index 0771680..d1e5b3f 100644 --- a/charts/values.yaml +++ b/charts/values.yaml @@ -30,7 +30,7 @@ global: namespace: orano2 o2ims: - serviceaccountname: admin + serviceaccountname: admin-orano2 image: repository: registry.local:9001/admin/o2imsdms tag: 0.1.1 -- 2.16.6 From 7ef3ae01ee1011d3b39be5330c8e0899dbe95bd0 Mon Sep 17 00:00:00 2001 From: dliu5 Date: Fri, 21 Oct 2022 14:17:20 +0800 Subject: [PATCH 11/16] Add helm chart for ca and config files. Change-Id: I21deaced6047cdee2c4a74399c39cd046356a7be Signed-off-by: dliu5 --- charts/templates/application_config.yaml | 28 ++++++++++++++++ charts/templates/ca_config.yaml | 28 ++++++++++++++++ charts/templates/deployment.yaml | 24 ++++++++++++-- charts/templates/serverkey_config.yaml | 28 ++++++++++++++++ charts/values.yaml | 6 ++++ docs/installation-guide.rst | 57 ++++++++++++++++++++++++++++++-- 6 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 charts/templates/application_config.yaml create mode 100644 charts/templates/ca_config.yaml create mode 100644 charts/templates/serverkey_config.yaml diff --git a/charts/templates/application_config.yaml b/charts/templates/application_config.yaml new file mode 100644 index 0000000..1381032 --- /dev/null +++ b/charts/templates/application_config.yaml @@ -0,0 +1,28 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-application-config + namespace: {{ .Values.global.namespace }} + labels: + release: {{ .Release.Name }} + app: {{ include "orano2.name" . }} + chart: {{ .Chart.Name }} +# ... +data: + config.json: | +{{ .Values.applicationconfig | indent 4 }} diff --git a/charts/templates/ca_config.yaml b/charts/templates/ca_config.yaml new file mode 100644 index 0000000..b8703d7 --- /dev/null +++ b/charts/templates/ca_config.yaml @@ -0,0 +1,28 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-caconfig + namespace: {{ .Values.global.namespace }} + labels: + release: {{ .Release.Name }} + app: {{ include "orano2.name" . }} + chart: {{ .Chart.Name }} +# ... +data: + config.json: | +{{ .Values.caconfig | indent 4 }} diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 5d82063..d229f44 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -139,6 +139,18 @@ spec: mountPath: /opt - name: configs mountPath: /configs + - name: applicationconfig + mountPath: /configs/o2app.conf + subPath: config.json + readOnly: true + - name: caconfig + mountPath: /configs/ca.cert + subPath: config.json + readOnly: true + - name: serverkeyconfig + mountPath: /configs/server.key + subPath: config.json + readOnly: true - name: helmcli image: "{{ .Values.o2ims.image.repository }}:{{ .Values.o2ims.image.tag }}" ports: @@ -152,12 +164,20 @@ spec: volumeMounts: - name: scripts mountPath: /opt - - name: configs - mountPath: /configs volumes: - name: scripts configMap: name: {{ .Chart.Name }}-scripts-configmap - name: configs emptyDir: {} + - configMap: + name: {{ .Chart.Name }}-application-config + name: applicationconfig + - configMap: + name: {{ .Chart.Name }}-serverkeyconfig + name: serverkeyconfig + - configMap: + name: {{ .Chart.Name }}-caconfig + name: caconfig --- + diff --git a/charts/templates/serverkey_config.yaml b/charts/templates/serverkey_config.yaml new file mode 100644 index 0000000..1949ff5 --- /dev/null +++ b/charts/templates/serverkey_config.yaml @@ -0,0 +1,28 @@ +# Copyright (C) 2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-serverkeyconfig + namespace: {{ .Values.global.namespace }} + labels: + release: {{ .Release.Name }} + app: {{ include "orano2.name" . }} + chart: {{ .Chart.Name }} +# ... +data: + config.json: | +{{ .Values.serverkeyconfig | indent 4 }} diff --git a/charts/values.yaml b/charts/values.yaml index 0771680..bb31c34 100644 --- a/charts/values.yaml +++ b/charts/values.yaml @@ -22,6 +22,12 @@ replicaCount: 1 nameOverride: "" fullnameOverride: "" +applicationconfig: + +caconfig: + +serverkeyconfig: + resources: cpu: 1 memory: 2Gi diff --git a/docs/installation-guide.rst b/docs/installation-guide.rst index 9b63c7c..7c19274 100644 --- a/docs/installation-guide.rst +++ b/docs/installation-guide.rst @@ -181,6 +181,57 @@ The following instruction should be done outside of INF platform controller host #export the smo account token data export SMO_TOKEN_DATA=$(kubectl -n default describe secret $(kubectl -n default get secret | grep ${SMO_SERVICEACCOUNT} | awk '{print $1}') | grep "token:" | awk '{print $2}') + #prepare the application config file + cat <app.conf + [DEFAULT] + + ocloud_global_id = 4e24b97c-8c49-4c4f-b53e-3de5235a4e37 + smo_register_url = http://127.0.0.1:8090/register + smo_token_data = ${SMO_TOKEN_DATA} + + [API] + test = "hello" + + [WATCHER] + + [PUBSUB] + + EOF + + #prepare the ssl cert files or generate with below command. + + PARENT="imsserver" + openssl req \ + -x509 \ + -newkey rsa:4096 \ + -sha256 \ + -days 365 \ + -nodes \ + -keyout $PARENT.key \ + -out $PARENT.crt \ + -subj "/CN=${PARENT}" \ + -extensions v3_ca \ + -extensions v3_req \ + -config <( \ + echo '[req]'; \ + echo 'default_bits= 4096'; \ + echo 'distinguished_name=req'; \ + echo 'x509_extension = v3_ca'; \ + echo 'req_extensions = v3_req'; \ + echo '[v3_req]'; \ + echo 'basicConstraints = CA:FALSE'; \ + echo 'keyUsage = nonRepudiation, digitalSignature, keyEncipherment'; \ + echo 'subjectAltName = @alt_names'; \ + echo '[ alt_names ]'; \ + echo "DNS.1 = www.${PARENT}"; \ + echo "DNS.2 = ${PARENT}"; \ + echo '[ v3_ca ]'; \ + echo 'subjectKeyIdentifier=hash'; \ + echo 'authorityKeyIdentifier=keyid:always,issuer'; \ + echo 'basicConstraints = critical, CA:TRUE, pathlen:0'; \ + echo 'keyUsage = critical, cRLSign, keyCertSign'; \ + echo 'extendedKeyUsage = serverAuth, clientAuth') + cat <o2service-override.yaml o2ims: imagePullSecrets: admin-orano2-registry-secret @@ -205,7 +256,7 @@ The following instruction should be done outside of INF platform controller host .. code:: shell - helm install o2service o2/charts/ -f o2service-override.yaml + helm install o2service o2/charts --set-file caconfig="./imsserver.cert" --set-file applicationconfig="./app.conf" --set-file serverkeyconfig="./imsserver.key" -f o2service-override.yaml helm list |grep o2service kubectl -n ${NAMESPACE} get pods |grep o2api kubectl -n ${NAMESPACE} get services |grep o2api @@ -219,11 +270,11 @@ The following instruction should be done outside of INF platform controller host curl -k http(s)://:30205/o2ims_infrastructureInventory/v1/ -2.5 INF O2 Service API Swagger +2.5 INF O2 Service API Swagger ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Swagger UI can be found with URL: http(s)://:30205 - + 3. Register INF O2 Service to SMO --------------------------------- -- 2.16.6 From 1c020c705baa0a2a55591750c0fd46fff52a6929 Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Fri, 21 Oct 2022 23:36:11 +0800 Subject: [PATCH 12/16] Fix Docker image and helm issue Issue-ID: INF-317 Signed-off-by: Bin Yang Change-Id: I8efb52d9d804d104e8c6f1ca8f51c0ce420b126c --- Dockerfile | 6 ++++++ charts/templates/deployment.yaml | 8 ++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index c3aba8d..989e9d4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,6 +36,12 @@ COPY o2ims/ /src/o2ims/ COPY o2dms/ /src/o2dms/ COPY o2common/ /src/o2common/ +RUN mkdir -p /src/helm_sdk/ +COPY helm_sdk/ /src/helm_sdk/ + +RUN mkdir -p /configs/ +COPY configs/ /configs/ + RUN mkdir -p /src/o2app/ COPY o2app/ /src/o2app/ COPY setup.py /src/ diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index d9ebf10..5f64ccb 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -139,8 +139,8 @@ spec: volumeMounts: - name: scripts mountPath: /opt - - name: configs - mountPath: /configs + # - name: configs + # mountPath: /configs - name: applicationconfig mountPath: /configs/o2app.conf subPath: config.json @@ -170,8 +170,8 @@ spec: - name: scripts configMap: name: {{ .Chart.Name }}-scripts-configmap - - name: configs - emptyDir: {} + # - name: configs + # emptyDir: {} - configMap: name: {{ .Chart.Name }}-application-config name: applicationconfig -- 2.16.6 From 12249c7669e487cd9f37cee8475c89eb5490e0f0 Mon Sep 17 00:00:00 2001 From: dliu5 Date: Sat, 22 Oct 2022 10:45:22 +0800 Subject: [PATCH 13/16] Fix the docker app config mount issue and installation inconsistency Signed-off-by: dliu5 Change-Id: Iae3afcdf3c5a760d7dea606fbcccdf06c56d9450 --- charts/templates/deployment.yaml | 8 ++++++++ docs/installation-guide.rst | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/charts/templates/deployment.yaml b/charts/templates/deployment.yaml index 5f64ccb..194ca7e 100644 --- a/charts/templates/deployment.yaml +++ b/charts/templates/deployment.yaml @@ -82,6 +82,10 @@ spec: volumeMounts: - name: scripts mountPath: /opt + - name: applicationconfig + mountPath: /configs/o2app.conf + subPath: config.json + readOnly: true - name: watcher image: "{{ .Values.o2ims.image.repository }}:{{ .Values.o2ims.image.tag }}" command: ["/bin/bash", "/opt/o2watcher_start.sh"] @@ -107,6 +111,10 @@ spec: volumeMounts: - name: scripts mountPath: /opt + - name: applicationconfig + mountPath: /configs/o2app.conf + subPath: config.json + readOnly: true - name: o2api image: "{{ .Values.o2ims.image.repository }}:{{ .Values.o2ims.image.tag }}" ports: diff --git a/docs/installation-guide.rst b/docs/installation-guide.rst index 7c19274..4c3e2bc 100644 --- a/docs/installation-guide.rst +++ b/docs/installation-guide.rst @@ -256,7 +256,7 @@ The following instruction should be done outside of INF platform controller host .. code:: shell - helm install o2service o2/charts --set-file caconfig="./imsserver.cert" --set-file applicationconfig="./app.conf" --set-file serverkeyconfig="./imsserver.key" -f o2service-override.yaml + helm install o2service o2/charts --set-file caconfig="./imsserver.crt" --set-file applicationconfig="./app.conf" --set-file serverkeyconfig="./imsserver.key" -f o2service-override.yaml helm list |grep o2service kubectl -n ${NAMESPACE} get pods |grep o2api kubectl -n ${NAMESPACE} get services |grep o2api -- 2.16.6 From 7526ba257f901de043e518e5bb6e4180782aaace Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Mon, 24 Oct 2022 11:36:41 +0800 Subject: [PATCH 14/16] Fix INF-319 failed to probe inventory resource Issue-ID: INF-319 Signed-off-by: Zhang Rong(Jon) Change-Id: I16765ae86f19671a5fe7788da1a792b9d7d3f222 --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 989e9d4..e837ed4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,17 +5,17 @@ RUN apt-get update && apt-get install -y git gcc procps vim curl ssh # in case git repo is not accessable # RUN mkdir -p /cgtsclient # COPY temp/config /cgtsclient/ -RUN git clone --depth 1 --branch master https://opendev.org/starlingx/config.git /cgtsclient +RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/config.git /cgtsclient RUN pip install -e /cgtsclient/sysinv/cgts-client/cgts-client/ # RUN mkdir -p /distcloud-client # COPY temp/distcloud-client /distcloud-client/ -RUN git clone --depth 1 --branch master https://opendev.org/starlingx/distcloud-client.git /distcloud-client/ +RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/distcloud-client.git /distcloud-client/ RUN pip install -e /distcloud-client/distributedcloud-client # in case git repo is not accessable # RUN git clone --depth 1 --branch master https://github.com/cloudify-incubator/cloudify-helm-plugin.git /helmsdk -RUN git clone --depth 1 --branch master https://opendev.org/starlingx/fault.git /faultclient +RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/fault.git /faultclient RUN pip install -e /faultclient/python-fmclient/fmclient/ -- 2.16.6 From 52d9e382d966d6ee095831f3274170e9e1d17dfa Mon Sep 17 00:00:00 2001 From: dliu5 Date: Mon, 24 Oct 2022 11:36:45 +0800 Subject: [PATCH 15/16] Change the helm installation command option to compatible with wrcp. Start up flask app with ssl configuration files. INF-312. Signed-off-by: dliu5 Change-Id: I0f4383e5c0ffbe65fcd648ba916a64edc70db7d1 --- charts/resources/scripts/init/o2api_start.sh | 2 +- docs/installation-guide.rst | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/charts/resources/scripts/init/o2api_start.sh b/charts/resources/scripts/init/o2api_start.sh index 5c8fcd6..6aa68a4 100644 --- a/charts/resources/scripts/init/o2api_start.sh +++ b/charts/resources/scripts/init/o2api_start.sh @@ -31,6 +31,6 @@ cat <>/etc/hosts EOF -flask run --host=0.0.0.0 --port=80 +flask run --host=0.0.0.0 --port=80 --cert /configs/ca.cert --key /configs/server.key sleep infinity diff --git a/docs/installation-guide.rst b/docs/installation-guide.rst index 4c3e2bc..1ef39fa 100644 --- a/docs/installation-guide.rst +++ b/docs/installation-guide.rst @@ -256,7 +256,10 @@ The following instruction should be done outside of INF platform controller host .. code:: shell - helm install o2service o2/charts --set-file caconfig="./imsserver.crt" --set-file applicationconfig="./app.conf" --set-file serverkeyconfig="./imsserver.key" -f o2service-override.yaml + config_data=`cat ./path/to/app.conf` + certification_data=`cat ./path/to/imsserver.crt` + key_data=`cat ./path/to/imsserver.key` + helm install o2service o2/charts --set caconfig="$certification_data" --set applicationconfig="$config_data" --set serverkeyconfig="$key_data" -f o2service-override.yaml helm list |grep o2service kubectl -n ${NAMESPACE} get pods |grep o2api kubectl -n ${NAMESPACE} get services |grep o2api -- 2.16.6 From 8f7352951c11d939bae11422c00c87dc1f1d2a85 Mon Sep 17 00:00:00 2001 From: "Zhang Rong(Jon)" Date: Tue, 25 Oct 2022 11:01:55 +0800 Subject: [PATCH 16/16] Add fields selector for API with query parameters INF-300 Issue-ID: INF-300 Signed-off-by: Zhang Rong(Jon) Change-Id: I4be68e71e685c77c570d4e605167294970e16888 --- o2app/bootstrap.py | 3 - o2app/entrypoints/flask_application.py | 2 + o2common/views/pagination_route.py | 1 - o2common/views/route.py | 233 +++++++++++++++++++++++++++++++++ o2ims/views/alarm_route.py | 88 +++++++++++++ o2ims/views/api_ns.py | 22 +++- o2ims/views/ocloud_dto.py | 40 ------ o2ims/views/ocloud_route.py | 229 +++++++++++++++++++++++++++++++- 8 files changed, 567 insertions(+), 51 deletions(-) create mode 100644 o2common/views/route.py diff --git a/o2app/bootstrap.py b/o2app/bootstrap.py index 228b240..e025a90 100644 --- a/o2app/bootstrap.py +++ b/o2app/bootstrap.py @@ -21,7 +21,6 @@ from o2common.adapter.notifications import AbstractNotifications,\ from o2common.adapter import redis_eventpublisher from o2common.service import unit_of_work from o2common.service import messagebus -from o2common.config import config from o2app.service import handlers from o2app.adapter.unit_of_work import SqlAlchemyUnitOfWork @@ -29,8 +28,6 @@ from o2app.adapter.unit_of_work import SqlAlchemyUnitOfWork from o2ims.adapter import orm as o2ims_orm from o2dms.adapter import orm as o2dms_orm -from o2ims.adapter.clients import alarm_dict_client - from o2common.helper import o2logging logger = o2logging.get_logger(__name__) diff --git a/o2app/entrypoints/flask_application.py b/o2app/entrypoints/flask_application.py index f74dca2..53e7e19 100644 --- a/o2app/entrypoints/flask_application.py +++ b/o2app/entrypoints/flask_application.py @@ -48,6 +48,8 @@ if auth: app.wsgi_app = authmiddleware.authmiddleware(app.wsgi_app) app.config.SWAGGER_UI_DOC_EXPANSION = 'list' +# app.config['RESTX_MASK_HEADER'] = 'fields' +app.config['RESTX_MASK_SWAGGER'] = False api = Api(app, version='1.0.0', title='INF O2 Services API', description='Swagger OpenAPI document for the INF O2 Services', diff --git a/o2common/views/pagination_route.py b/o2common/views/pagination_route.py index e7b738f..c595d9f 100644 --- a/o2common/views/pagination_route.py +++ b/o2common/views/pagination_route.py @@ -37,7 +37,6 @@ def link_header(full_path: str, ret): '&') if q.split('=')[0] != PAGE_PARAM]) if query != '': query = query + '&' - logger.warning(query) link_list = [] if (page_current > 1): diff --git a/o2common/views/route.py b/o2common/views/route.py new file mode 100644 index 0000000..b2d537d --- /dev/null +++ b/o2common/views/route.py @@ -0,0 +1,233 @@ +# Copyright (C) 2021-2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +# from collections import OrderedDict +from functools import wraps +# from six import iteritems + +from flask import request + +from flask_restx import Namespace +from flask_restx._http import HTTPStatus +from flask_restx.marshalling import marshal_with, marshal +from flask_restx.utils import merge +from flask_restx.mask import Mask # , apply as apply_mask +from flask_restx.model import Model +from flask_restx.fields import List, Nested, String +from flask_restx.utils import unpack + +from o2common.helper import o2logging +logger = o2logging.get_logger(__name__) + + +class O2Namespace(Namespace): + + def __init__(self, name, description=None, path=None, decorators=None, + validate=None, authorizations=None, ordered=False, **kwargs): + super().__init__(name, description, path, decorators, + validate, authorizations, ordered, **kwargs) + + def marshal_with( + self, fields, as_list=False, code=HTTPStatus.OK, description=None, + **kwargs + ): + """ + A decorator specifying the fields to use for serialization. + + :param bool as_list: Indicate that the return type is a list \ + (for the documentation) + :param int code: Optionally give the expected HTTP response \ + code if its different from 200 + + """ + + def wrapper(func): + doc = { + "responses": { + str(code): (description, [fields], kwargs) + if as_list + else (description, fields, kwargs) + }, + "__mask__": kwargs.get( + "mask", True + ), # Mask values can't be determined outside app context + } + func.__apidoc__ = merge(getattr(func, "__apidoc__", {}), doc) + return o2_marshal_with(fields, ordered=self.ordered, + **kwargs)(func) + + return wrapper + + +class o2_marshal_with(marshal_with): + def __init__( + self, fields, envelope=None, skip_none=False, mask=None, ordered=False + ): + """ + :param fields: a dict of whose keys will make up the final + serialized response output + :param envelope: optional key that will be used to envelop the + serialized response + """ + self.fields = fields + self.envelope = envelope + self.skip_none = skip_none + self.ordered = ordered + self.mask = Mask(mask, skip=True) + + def __call__(self, f): + @wraps(f) + def wrapper(*args, **kwargs): + resp = f(*args, **kwargs) + + req_args = request.args + mask = self._gen_mask_from_filter(**req_args) + + # mask = self.mask + + # if has_request_context(): + # mask_header = current_app.config["RESTX_MASK_HEADER"] + # mask = request.headers.get(mask_header) or mask + if isinstance(resp, tuple): + data, code, headers = unpack(resp) + return ( + marshal( + data, + self.fields, + self.envelope, + self.skip_none, + mask, + self.ordered, + ), + code, + headers, + ) + else: + return marshal( + resp, self.fields, self.envelope, self.skip_none, mask, + self.ordered + ) + + return wrapper + + def _gen_mask_from_filter(self, **kwargs) -> str: + mask_val = '' + if 'all_fields' in kwargs: + all_fields_without_space = kwargs['all_fields'].replace(" ", "") + all_fields = all_fields_without_space.lower() + if 'true' == all_fields: + mask_val = '' + + elif 'fields' in kwargs and kwargs['fields'] != '': + fields_without_space = kwargs['fields'].replace(" ", "") + + # filters = fields_without_space.split(',') + + # mask_val_list = [] + # for f in filters: + # if '/' in f: + # a = self.__gen_mask_tree(f) + # mask_val_list.append(a) + # continue + # mask_val_list.append(f) + # mask_val = '{%s}' % ','.join(mask_val_list) + default_fields = {} + + self.__update_filter_value( + default_fields, fields_without_space, True) + + mask_val = self.__gen_mask_from_filter_tree(default_fields) + + elif 'exclude_fields' in kwargs and kwargs['exclude_fields'] != '': + exclude_fields_without_space = kwargs['exclude_fields'].replace( + " ", "") + + default_fields = self.__gen_filter_tree_from_model_with_value( + self.fields) + + self.__update_filter_value( + default_fields, exclude_fields_without_space, False) + + mask_val = self.__gen_mask_from_filter_tree(default_fields) + elif 'exclude_default' in kwargs and kwargs['exclude_default'] != '': + exclude_default_without_space = kwargs['exclude_default'].replace( + " ", "") + exclude_default = exclude_default_without_space.lower() + if 'true' == exclude_default: + mask_val = '{}' + + else: + mask_val = '' + + return mask_val + + def __gen_mask_tree(self, field: str) -> str: + + f = field.split('/', 1) + if len(f) > 1: + s = self.__gen_mask_tree(f[1]) + return '%s%s' % (f[0], s) + else: + return '{%s}' % f[0] + + def __gen_filter_tree_from_model_with_value( + self, model: Model, default_val: bool = True) -> dict: + filter = dict() + for i in model: + if type(model[i]) is List: + if type(model[i].container) is String: + filter[i] = default_val + continue + filter[i] = self.__gen_filter_tree_from_model_with_value( + model[i].container.model, default_val) + continue + elif type(model[i]) is Nested: + filter[i] = self.__gen_filter_tree_from_model_with_value( + model[i].model, default_val) + filter[i] = default_val + return filter + + def __update_filter_value(self, default_fields: dict, filter: str, + val: bool): + fields = filter.split(',') + for f in fields: + if '/' in f: + self.__update_filter_tree_value(default_fields, f, val) + continue + default_fields[f] = val + + def __update_filter_tree_value(self, m: dict, filter: str, val: bool): + filter_list = filter.split('/', 1) + if filter_list[0] not in m: + m[filter_list[0]] = dict() + if len(filter_list) > 1: + self.__update_filter_tree_value( + m[filter_list[0]], filter_list[1], val) + return + m[filter_list[0]] = val + + def __gen_mask_from_filter_tree(self, fields: dict) -> str: + mask_li = list() + for k, v in fields.items(): + if type(v) is dict: + s = self.__gen_mask_from_filter_tree(v) + mask_li.append('%s%s' % (k, s)) + continue + if v: + mask_li.append(k) + + return '{%s}' % ','.join(mask_li) diff --git a/o2ims/views/alarm_route.py b/o2ims/views/alarm_route.py index 6b32eee..28bfd5c 100644 --- a/o2ims/views/alarm_route.py +++ b/o2ims/views/alarm_route.py @@ -37,6 +37,27 @@ def configure_api_route(): 'Page number of the results to fetch.' + ' Default: 1', _in='query', default=1) +@api_monitoring_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_monitoring_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') +@api_monitoring_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_monitoring_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class AlarmListRouter(Resource): model = AlarmDTO.alarm_event_record_get @@ -57,6 +78,27 @@ class AlarmListRouter(Resource): @api_monitoring_v1.route("/alarms/") @api_monitoring_v1.param('alarmEventRecordId', 'ID of the alarm event record') @api_monitoring_v1.response(404, 'Alarm Event Record not found') +@api_monitoring_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_monitoring_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') +@api_monitoring_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_monitoring_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class AlarmGetRouter(Resource): model = AlarmDTO.alarm_event_record_get @@ -81,6 +123,31 @@ class SubscriptionsListRouter(Resource): @api_monitoring_v1.doc('List alarm subscriptions') @api_monitoring_v1.marshal_list_with(model) + @api_monitoring_v1.param( + PAGE_PARAM, + 'Page number of the results to fetch. Default: 1', + _in='query', default=1) + @api_monitoring_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') + @api_monitoring_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') + @api_monitoring_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') + @api_monitoring_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') def get(self): parser = reqparse.RequestParser() parser.add_argument(PAGE_PARAM, location='args') @@ -110,6 +177,27 @@ class SubscriptionGetDelRouter(Resource): @api_monitoring_v1.doc('Get Alarm Subscription by ID') @api_monitoring_v1.marshal_with(model) + @api_monitoring_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') + @api_monitoring_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') + @api_monitoring_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') + @api_monitoring_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') def get(self, alarmSubscriptionID): result = alarm_view.subscription_one( alarmSubscriptionID, bus.uow) diff --git a/o2ims/views/api_ns.py b/o2ims/views/api_ns.py index 3fbdc18..b06cb2a 100644 --- a/o2ims/views/api_ns.py +++ b/o2ims/views/api_ns.py @@ -1,14 +1,28 @@ -from flask_restx import Namespace +# Copyright (C) 2021-2022 Wind River Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from o2common.views.route import O2Namespace -api_ims_inventory_v1 = Namespace( + +api_ims_inventory_v1 = O2Namespace( "O2IMS_Inventory", description='IMS Inventory related operations.') -api_provision_v1 = Namespace( +api_provision_v1 = O2Namespace( "PROVISION", description='Provision related operations.') -api_monitoring_v1 = Namespace( +api_monitoring_v1 = O2Namespace( "O2IMS_InfrastructureMonitoring", description='O2 IMS Monitoring related operations.') diff --git a/o2ims/views/ocloud_dto.py b/o2ims/views/ocloud_dto.py index c8e5202..df8385a 100644 --- a/o2ims/views/ocloud_dto.py +++ b/o2ims/views/ocloud_dto.py @@ -75,46 +75,6 @@ class ResourceDTO: } ) - list_result = api_ims_inventory_v1.model( - "ResourceListPagenationDto", - { - 'count': fields.Integer(), - 'page_num': fields.Integer(), - 'results': fields.List(fields.Nested(resource_list)) - } - ) - - # def get_paginated_list(results, url, start, limit): - # start = int(start) - # limit = int(limit) - # count = len(results) - # if count < start or limit < 0: - # api_ims_inventory_v1.abort(404) - # # make response - # obj = {} - # obj['start'] = start - # obj['limit'] = limit - # obj['count'] = count - # # make URLs - # # make previous url - # if start == 1: - # obj['previous'] = '' - # else: - # start_copy = max(1, start - limit) - # limit_copy = start - 1 - # obj['previous'] = url + \ - # '?start=%d&limit=%d' % (start_copy, limit_copy) - # # make next url - # if start + limit > count: - # obj['next'] = '' - # else: - # start_copy = start + limit - # obj['next'] = url + '?start=%d&limit=%d' % (start_copy, limit) - # # finally extract result according to bounds - # # obj['results'] = results[(start - 1):(start - 1 + limit)] - # obj['result'] = fields.List(fields.Nested(ResourceDTO.resource_list)) - # return obj - def recursive_resource_mapping(iteration_number=2): resource_json_mapping = { 'resourceId': fields.String(required=True, diff --git a/o2ims/views/ocloud_route.py b/o2ims/views/ocloud_route.py index 5feb6b8..43a4761 100644 --- a/o2ims/views/ocloud_route.py +++ b/o2ims/views/ocloud_route.py @@ -35,6 +35,26 @@ def configure_api_route(): # ---------- OClouds ---------- # @api_ims_inventory_v1.route("/") @api_ims_inventory_v1.response(404, 'oCloud not found') +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class OcloudsListRouter(Resource): """Ocloud get endpoint O2 interface ocloud endpoint @@ -57,6 +77,26 @@ class OcloudsListRouter(Resource): 'Page number of the results to fetch.' + ' Default: 1', _in='query', default=1) +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourceTypesListRouter(Resource): model = ResourceTypeDTO.resource_type_get @@ -77,6 +117,26 @@ class ResourceTypesListRouter(Resource): @api_ims_inventory_v1.route("/resourceTypes/") @api_ims_inventory_v1.param('resourceTypeID', 'ID of the resource type') @api_ims_inventory_v1.response(404, 'Resource type not found') +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourceTypeGetRouter(Resource): model = ResourceTypeDTO.resource_type_get @@ -97,6 +157,26 @@ class ResourceTypeGetRouter(Resource): 'Page number of the results to fetch.' + ' Default: 1', _in='query', default=1) +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourcePoolsListRouter(Resource): model = ResourcePoolDTO.resource_pool_get @@ -117,6 +197,26 @@ class ResourcePoolsListRouter(Resource): @api_ims_inventory_v1.route("/resourcePools/") @api_ims_inventory_v1.param('resourcePoolID', 'ID of the resource pool') @api_ims_inventory_v1.response(404, 'Resource pool not found') +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourcePoolGetRouter(Resource): model = ResourcePoolDTO.resource_pool_get @@ -147,6 +247,26 @@ class ResourcePoolGetRouter(Resource): 'Page number of the results to fetch.' + ' Default: 1', _in='query', default=1) +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourcesListRouter(Resource): model = ResourceDTO.resource_list @@ -156,8 +276,6 @@ class ResourcesListRouter(Resource): parser = reqparse.RequestParser() parser.add_argument('resourceTypeName', location='args') parser.add_argument('parentId', location='args') - # parser.add_argument('sort', location='args') - # parser.add_argument('per_page', location='args') parser.add_argument(PAGE_PARAM, location='args') args = parser.parse_args() kwargs = {} @@ -174,7 +292,6 @@ class ResourcesListRouter(Resource): kwargs['page'] = args.nextpage_opaque_marker ret = ocloud_view.resources(resourcePoolID, bus.uow, **kwargs) - return link_header(request.full_path, ret) @@ -183,6 +300,26 @@ class ResourcesListRouter(Resource): @api_ims_inventory_v1.param('resourcePoolID', 'ID of the resource pool') @api_ims_inventory_v1.param('resourceID', 'ID of the resource') @api_ims_inventory_v1.response(404, 'Resource not found') +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class ResourceGetRouter(Resource): # dto = ResourceDTO() @@ -205,6 +342,26 @@ class ResourceGetRouter(Resource): 'Page number of the results to fetch.' + ' Default: 1', _in='query', default=1) +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class DeploymentManagersListRouter(Resource): model = DeploymentManagerDTO.deployment_manager_list @@ -228,6 +385,26 @@ class DeploymentManagersListRouter(Resource): @api_ims_inventory_v1.param('profile', 'DMS profile: value supports "sol018"', _in='query') @api_ims_inventory_v1.response(404, 'Deployment manager not found') +@api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover "exculde_fields".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') +@api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') class DeploymentManagerGetRouter(Resource): model = DeploymentManagerDTO.deployment_manager_get @@ -260,6 +437,31 @@ class SubscriptionsListRouter(Resource): @api_ims_inventory_v1.doc('List subscriptions') @api_ims_inventory_v1.marshal_list_with(model) + @api_ims_inventory_v1.param( + PAGE_PARAM, + 'Page number of the results to fetch. Default: 1', + _in='query', default=1) + @api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') + @api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') + @api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') + @api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') def get(self): parser = reqparse.RequestParser() parser.add_argument(PAGE_PARAM, location='args') @@ -289,6 +491,27 @@ class SubscriptionGetDelRouter(Resource): @api_ims_inventory_v1.doc('Get subscription by ID') @api_ims_inventory_v1.marshal_with(model) + @api_ims_inventory_v1.param( + 'all_fields', + 'Set any value for show all fields. This value will cover "fields" ' + + 'and "all_fields".', + _in='query') + @api_ims_inventory_v1.param( + 'fields', + 'Set fields to show, split by comman, "/" for parent and children.' + + ' Like "name,parent/children". This value will cover' + + ' "exculde_fields".', + _in='query') + @api_ims_inventory_v1.param( + 'exclude_fields', + 'Set fields to exclude showing, split by comman, "/" for parent and ' + + 'children. Like "name,parent/children". This value will cover ' + + '"exclude_default".', + _in='query') + @api_ims_inventory_v1.param( + 'exclude_default', + 'Exclude showing all default fields, Set "true" to enable.', + _in='query') def get(self, subscriptionID): result = ocloud_view.subscription_one( subscriptionID, bus.uow) -- 2.16.6