Fix the alarm exception about the alarmDefinition 47/13147/1 2.0.4
authorZhang Rong(Jon) <rong.zhang@windriver.com>
Mon, 1 Jul 2024 15:19:44 +0000 (23:19 +0800)
committerZhang Rong(Jon) <rong.zhang@windriver.com>
Tue, 2 Jul 2024 02:49:27 +0000 (10:49 +0800)
When the alarm is inserted into the database, the alarmDefinition does
not exist a definition to mapping with alarm. It will throw an
exception.

This commit will add the new alarm definitions.

Test Case:
PASS - Test a new alarm without definition before, it worked as
       expect.

Issue-ID: INF-478

Change-Id: I201806298270f0df5ca035768d08efb1add1ab8e
Signed-off-by: Zhang Rong(Jon) <rong.zhang@windriver.com>
configs/events.yaml [changed mode: 0755->0644]

old mode 100755 (executable)
new mode 100644 (file)
index d15a423..0df93ea
@@ -1,7 +1,7 @@
 ---
 
 #
-# Copyright (c) 2013-2021 Wind River Systems, Inc.
+# Copyright (c) 2013-2024 Wind River Systems, Inc.
 #
 # SPDX-License-Identifier: Apache-2.0
 #
 #       // lowest alarm level of this type that will block forced upgrades & orchestration actions
 #   Degrade_Affecting_Severity: < none | critical | major | minor >
 #       // lowest alarm level of this type sets a host to 'degraded'
+#   Context: < none | starlingx | openstack >
+#       // Identifies where the alarm/log is used. If it should be ignored by
+#       // the documentation generating scripts, the value has to be 'none'.
+#       // If any of the other values is used, the alarm/log will be included
+#       // in the documentation and classified by the chosen value.
 #
 #
 #   Other Notes:
     Suppression: True
     Management_Affecting_Severity: major
     Degrade_Affecting_Severity: critical
+    Context: starlingx
 
 100.102:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: none
 
 100.103:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: critical
+    Context: starlingx
 
 100.104:    # NOTE This should really be split into two different Alarms.
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: critical
     Degrade_Affecting_Severity: critical
+    Context: starlingx
 
 100.105:
     Type: Alarm
     Entity_Instance_ID: fs_name=<image-conversion>
     Severity: critical
     Proposed_Repair_Action: "Add image-conversion filesystem on both controllers.
-                             Consult the System Administration Manual for more details.
+                             See the |prod-long| documentation at |docs-url| for more details.
                              If problem persists, contact next level of support."
     Maintenance_Action: degrade
     Inhibit_Alarms:
     Suppression: False
     Management_Affecting_Severity: major
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 #--------
 # 100.105: Retired (with R2 release): previously monitored /etc/nova/instances
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.107:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.108:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.109:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.110:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.111:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 100.112:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: major
+    Context: openstack
 
 100.113:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: major
+    Context: openstack
 
 100.114:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 100.115:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: critical
+    Context: none
 
 100.116:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: critical
+    Context: none
 
 100.117:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: major
     Degrade_Affecting_Severity: critical
+    Context: none
 
 100.118:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 100.119:
     Type: Alarm
         OR
         <hostname> PTP clocking is out-of-tolerance
         OR
-        <hostname> is not locked to remote PTP Grand Master
+        <hostname> is not locked to remote PTP Primary source
         OR
         <hostname> GNSS signal loss state:<state>
         OR
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
+
+100.120:
+    Type: Alarm
+    Description: Controllers running mismatched kernels.
+    Entity_Instance_ID: host=<hostname>.kernel=<kernel>
+    Severity: minor
+    Proposed_Repair_Action: "Modify controllers using 'system host-kernel-modify' so that both are running the desired 'standard' or 'lowlatency' kernel."
+    Maintenance_Action: none
+    Inhibit_Alarms: False
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: none
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+100.121:
+    Type: Alarm
+    Description: Host not running the provisioned kernel.
+    Entity_Instance_ID: host=<hostname>.kernel=<kernel>
+    Severity: major
+    Proposed_Repair_Action: "Retry 'system host-kernel-modify' and if condition persists, contact next level of support."
+    Maintenance_Action: none
+    Inhibit_Alarms: False
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: major
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 100.150:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: critical
     Degrade_Affecting_Severity: critical
+    Context: starlingx
+
 
 #---------------------------------------------------------------------------
 #   MAINTENANCE
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
+
+200.003:
+    Type: Alarm
+    Description: <hostname> pxeboot network communication failure.
+    Entity_Instance_ID: host=<hostname>
+    Severity: minor
+    Proposed_Repair_Action: Administratively Lock and Unlock host to recover. If problem persists, contact next level of support.
+    Maintenance_Action: none
+    Inhibit_Alarms: False
+    Alarm_Type: communication
+    Probable_Cause: unknown
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.004:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.011:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.010:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
-
-200.012:
-    Type: Alarm
-    Description: <hostname> controller function has in-service failure while compute services remain healthy.
-    Entity_Instance_ID: host=<hostname>
-    Severity: major
-    Proposed_Repair_Action: Lock and then Unlock host to recover. Avoid using 'Force Lock' action as that will impact compute services running on this host.  If lock action fails then contact next level of support to investigate and recover.
-    Maintenance_Action: "degrade - requires manual action"
-    Inhibit_Alarms: False
-    Alarm_Type: operational-violation
-    Probable_Cause: communication-subsystem-failure
-    Service_Affecting: True
-    Suppression: True
-    Management_Affecting_Severity: warning
-    Degrade_Affecting_Severity: major
+    Context: starlingx
 
 200.013:
     Type: Alarm
-    Description: <hostname> compute service of the only available controller is not poperational. Auto-recovery is disabled. Deggrading host instead.
+    Description: <hostname> compute service of the only available controller is not proportional. Auto-recovery is disabled. Degrading host instead.
     Entity_Instance_ID: host=<hostname>
     Severity: major
     Proposed_Repair_Action: Enable second controller and Switch Activity (Swact) over to it as soon as possible. Then Lock and Unlock host to recover its local compute service.
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 200.005:
     Type: Alarm
     Description: |-
         Degrade:
-        <hostname> is experiencing an intermittent 'Management Network'  communication failures that have exceeded its lower alarming threshold.
+        <hostname> is experiencing an intermittent 'Management Network' communication failure that have exceeded its lower alarming threshold.
 
         Failure:
         <hostname> is experiencing a persistent critical 'Management Network' communication failure."
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.009:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 200.006:
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 # 200.006:      // NOTE using duplicate ID of a completely analogous Alarm for this
 #     Type: Log
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: critical
+    Context: starlingx
 
 200.014:
     Type: Alarm
-    Description: "The Hardware Monitor was unable to load, configure and monitor one or more hardware sensors."
+    Description: The Hardware Monitor was unable to load, configure and monitor one or more hardware sensors.
     Entity_Instance_ID: host=<hostname>
     Severity: minor
-    Proposed_Repair_Action: Check Board Management Controller provisioning. Try reprovisioning the BMC. If problem persists try power cycling the host and then the entire server including the BMC power. If problem persists then contact next level of support.
+    Proposed_Repair_Action: Check Board Management Controller provisioning. Try reprovisioning the BMC. If problem persists, try power cycling the host and then the entire server including the BMC power. If problem persists, then contact next level of support.
     Maintenance_Action: None
     Inhibit_Alarms: False
     Alarm_Type: operational-violation
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.015:
     Type: Alarm
     Description: Unable to read one or more sensor groups from this host's board management controller
     Entity_Instance_ID: host=<hostname>
     Severity: major
-    Proposed_Repair_Action: Check board management connectivity and try rebooting the board management controller. If problem persists contact next level of support or lock and replace failing host.
+    Proposed_Repair_Action: Check board management connectivity and try rebooting the board management controller. If problem persists, contact next level of support or lock and replace failing host.
     Maintenance_Action: None
     Inhibit_Alarms: False
     Alarm_Type: operational-violation
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
+200.016:
+    Type: Alarm
+    Description: Issue in creation or unsealing of LUKS volume
+    Entity_Instance_ID: host=<hostname>
+    Severity: critical
+    Proposed_Repair_Action: If auto-recovery is consistently unable to recover host to the unlocked-enabled state contact next level of support or lock and replace failing host.
+    Maintenance_Action: None
+    Inhibit_Alarms: False
+    Alarm_Type: operational-violation
+    Probable_Cause: unknown
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: major
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 200.020:
     Type: Log
-    Description: ["<hostname> has been 'discovered' on the network",
-                  "<hostname> has been 'added' to the system",
-                  "<hostname> has 'entered' multi-node failure avoidance",
-                  "<hostname> has 'exited' multi-node failure avoidance"]
-    Entity_Instance_ID: [host=<hostname>.event=discovered,
-                         host=<hostname>.event=add,
-                         host=<hostname>.event=mnfa_enter,
-                         host=<hostname>.event=mnfa_exit]
+    Description: |-
+        <hostname> has been 'discovered' on the network
+        OR
+        <hostname> has been 'added' to the system
+        OR
+        <hostname> has 'entered' multi-node failure avoidance
+        OR
+        <hostname> has 'exited' multi-node failure avoidance
+    Entity_Instance_ID:
+        host=<hostname>.event=discovered
+        OR
+        host=<hostname>.event=add
+        OR
+        host=<hostname>.event=mnfa_enter
+        OR
+        host=<hostname>.event=mnfa_exit
     Severity: warning
     Alarm_Type: other
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: starlingx
 
 
 200.021:
     Type: Log
-    Description: ["<hostname> board management controller has been 'provisioned'",
-                  "<hostname> board management controller has been 're-provisioned'",
-                  "<hostname> board management controller has been 'de-provisioned'",
-                  "<hostname> manual 'unlock' request",
-                  "<hostname> manual 'reboot' request",
-                  "<hostname> manual 'reset' request",
-                  "<hostname> manual 'power-off' request",
-                  "<hostname> manual 'power-on' request",
-                  "<hostname> manual 'reinstall' request",
-                  "<hostname> manual 'force-lock' request",
-                  "<hostname> manual 'delete' request",
-                  "<hostname> manual 'controller switchover' request"]
-    Entity_Instance_ID: [host=<hostname>.command=provision,
-                         host=<hostname>.command=reprovision,
-                         host=<hostname>.command=deprovision,
-                         host=<hostname>.command=unlock,
-                         host=<hostname>.command=reboot,
-                         host=<hostname>.command=reset,
-                         host=<hostname>.command=power-off,
-                         host=<hostname>.command=power-on,
-                         host=<hostname>.command=reinstall,
-                         host=<hostname>.command=force-lock,
-                         host=<hostname>.command=delete,
-                         host=<hostname>.command=swact]
+    Description: |-
+        <hostname> board management controller has been 'provisioned'
+        OR
+        <hostname> board management controller has been 're-provisioned'
+        OR
+        <hostname> board management controller has been 'de-provisioned'
+        OR
+        <hostname> manual 'unlock' request
+        OR
+        <hostname> manual 'reboot' request
+        OR
+        <hostname> manual 'reset' request
+        OR
+        <hostname> manual 'power-off' request
+        OR
+        <hostname> manual 'power-on' request
+        OR
+        <hostname> manual 'reinstall' request
+        OR
+        <hostname> manual 'force-lock' request
+        OR
+        <hostname> manual 'delete' request
+        OR
+        <hostname> manual 'controller switchover' request
+    Entity_Instance_ID: |-
+        host=<hostname>.command=provision
+        OR
+        host=<hostname>.command=reprovision
+        OR
+        host=<hostname>.command=deprovision
+        OR
+        host=<hostname>.command=unlock
+        OR
+        host=<hostname>.command=reboot
+        OR
+        host=<hostname>.command=reset
+        OR
+        host=<hostname>.command=power-off
+        OR
+        host=<hostname>.command=power-on
+        OR
+        host=<hostname>.command=reinstall
+        OR
+        host=<hostname>.command=force-lock
+        OR
+        host=<hostname>.command=delete
+        OR
+        host=<hostname>.command=swact
     Severity: warning
     Alarm_Type: other
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 
 200.022:
     Type: Log
-    Description: ["<hostname> is now 'disabled'",
-                  "<hostname> is now 'enabled'",
-                  "<hostname> is now 'online'",
-                  "<hostname> is now 'offline'",
-                  "<hostname> is 'disabled-failed' to the system",
-                  "<hostname> reinstall failed",
-                  "<hostname> reinstall completed successfully"]
-    Entity_Instance_ID: [host=<hostname>.state=disabled,
-                         host=<hostname>.state=enabled,
-                         host=<hostname>.status=online,
-                         host=<hostname>.status=offline,
-                         host=<hostname>.status=failed,
-                         host=<hostname>.status=reinstall-failed,
-                         host=<hostname>.status=reinstall-complete]
+    Description: |-
+        <hostname> is now 'disabled'
+        OR
+        <hostname> is now 'enabled'
+        OR
+        <hostname> is now 'online'
+        OR
+        <hostname> is now 'offline'
+        OR
+        <hostname> is 'disabled-failed' to the system
+        OR
+        <hostname> reinstall failed
+        OR
+        <hostname> reinstall completed successfully
+    Entity_Instance_ID: |-
+        host=<hostname>.state=disabled
+        OR
+        host=<hostname>.state=enabled
+        OR
+        host=<hostname>.status=online
+        OR
+        host=<hostname>.status=offline
+        OR
+        host=<hostname>.status=failed
+        OR
+        host=<hostname>.status=reinstall-failed
+        OR
+        host=<hostname>.status=reinstall-complete
     Severity: warning
     Alarm_Type: other
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: starlingx
 
 
 #---------------------------------------------------------------------------
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
+
+210.002:
+    Type: Alarm
+    Description: System Restore in progress.
+    Entity_Instance_ID: host=controller
+    Severity: minor
+    Proposed_Repair_Action: Run 'system restore-complete' to complete restore if running restore manually.
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: operational-violation
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 #---------------------------------------------------------------------------
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
-250.002:
+
+250.003:
     Type: Alarm
-    Description: <hostname> Ceph cache tiering configuration is out-of-date.
-    Entity_Instance_ID: cluster=<dist-fs-uuid>
+    Description: "Kubernetes certificates rotation failed on host[, reason = <reason_text>]"
+    Entity_Instance_ID: host=<hostname>
     Severity: major
-    Proposed_Repair_Action: Apply Ceph service parameter settings.
+    Proposed_Repair_Action: Lock and unlock the host to update services with new certificates (Manually renew kubernetes certificates first if renewal failed).
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: operational-violation
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
-250.003:
+250.004:
     Type: Alarm
-    Description: "Kubernetes certificates rotation failed on host[, reason = <reason_text>]"
+    Description: "IPsec certificates renewal failed on host[, reason = <reason_text>]"
     Entity_Instance_ID: host=<hostname>
     Severity: major
-    Proposed_Repair_Action: Lock and unlock the host to update services with new certificates (Manually renew kubernetes certificates first if renewal failed).
+    Proposed_Repair_Action: Check cron.log and ipsec-auth.log, fix the issue and rerun the renewal cron job.
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: operational-violation
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 #---------------------------------------------------------------------------
-#   Deployment Manager Monitor
+#   DEPLOYMENT
 #---------------------------------------------------------------------------
 260.001:
     Type: Alarm
-    Description: "Deployment Manager resource not reconciled: <name>"
+    Description: "Deployment resource not reconciled: <name>"
     Entity_Instance_ID: resource=<crd-resource>,name=<resource-name>
     Severity: major
     Proposed_Repair_Action: Monitor and if condition persists, validate deployment configuration.
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
-#---------------------------------------------------------------------------
-#   VM Compute Services
-#---------------------------------------------------------------------------
-270.001:
+260.002:
     Type: Alarm
-    Description: "Host <host_name> compute services failure[, reason = <reason_text>]"
-    Entity_Instance_ID: host=<host_name>.services=compute
-    Severity: critical
-    Proposed_Repair_Action: Wait for host services recovery to complete; if problem persists contact next level of support
+    Description: "Deployment resource not synchronized: <name>"
+    Entity_Instance_ID: resource=<crd-resource>,name=<resource-name>
+    Severity: minor
+    Proposed_Repair_Action: Monitor and if condition persists, validate deployment configuration.
     Maintenance_Action:
     Inhibit_Alarms:
-    Alarm_Type: processing-error
-    Probable_Cause: unspecified-reason
-    Service_Affecting: True
+    Alarm_Type: operational-violation
+    Probable_Cause: configuration-out-of-date
+    Service_Affecting: False
     Suppression: True
-    Management_Affecting_Severity: warning
+    Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
+#---------------------------------------------------------------------------
+#   VM Compute Services
+#---------------------------------------------------------------------------
 270.101:
     Type: Log
     Description: "Host <host_name> compute services failure[, reason = <reason_text>]"
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 270.102:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 270.103:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 
 275.001:
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 
 #---------------------------------------------------------------------------
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 280.002:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 280.003:
     Type: Alarm
-    Description: Subcloud Backup Failure
+    Description: Subcloud backup failure
     Entity_Instance_ID: subcloud=<subcloud>
     Severity: minor
-    Proposed_Repair_Action: Retry subcloud backup after checking backup input file. If problem persists contact next level of support.
+    Proposed_Repair_Action: Retry subcloud backup after checking backup input file. If problem persists, contact next level of support.
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: processing-error
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: none
+
+280.004:
+    Type: Alarm
+    Description: |-
+        Critical: Peer <peer_uuid> is in disconnected state. The following subcloud peer groups are impacted: <peer-groups>.
+        Major:    Peer <peer_uuid> connections in disconnected state.
+    Entity_Instance_ID: |-
+        peer=<peer_uuid>
+    Severity: [critical, major]
+    Proposed_Repair_Action: "Check the connectivity between the current system and the reported peer site. If the peer system is down, migrate the affected peer group(s) to the current system for continued subcloud management."
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: communication
+    Probable_Cause: unknown
+    Service_Affecting: False
+    Suppression: True
+    Management_Affecting_Severity: none
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+280.005:
+    Type: Alarm
+    Description: |-
+        Subcloud peer group <peer_group_name> is managed by remote system <peer_uuid> with a lower priority.
+    Entity_Instance_ID: peer_group=<peer_group_name>,peer=<peer_uuid>
+    Severity: [major]
+    Proposed_Repair_Action: "Check the reported peer group state. Migrate it back to the current system if the state is 'rehomed' and the current system is stable. Otherwise, wait until these conditions are met."
+    Maintenance_Action:
+    Inhibit_Alarms: False
+    Alarm_Type: other
+    Probable_Cause: unknown
+    Service_Affecting: False
+    Suppression: True
+    Management_Affecting_Severity: none
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 #---------------------------------------------------------------------------
 #   NETWORK
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 300.002:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: critical
+    Context: openstack
 
 
 300.003:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 300.004:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 300.005:
         Communication failure detected over provider network x% on host z%.
     Entity_Instance_ID: host=<hostname>.service=networking.providernet=<pnet-uuid>
     Severity: major
-    Proposed_Repair_Action: Check neighbour switch port VLAN assignments.
+    Proposed_Repair_Action: Check neighbor switch port VLAN assignments.
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: operational-violation
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 300.010:
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 300.012:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: critical
+    Context: openstack
 
 
 300.013:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: critical
+    Context: openstack
 
 
 300.014:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: critical
+    Context: none
 
 
 300.015:
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: critical
+    Context: openstack
 
 300.016:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 #---------------------------------------------------------------------------
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: major
+    Context: starlingx
 
 
 400.002:
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 400.003:
     Suppression: False
     Management_Affecting_Severity: critical
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 # 400.004:    // NOTE Removed
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 
 #---------------------------------------------------------------------------
     Alarm_Type: processing-error
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: openstack
 
 401.002:
     Type: Log
     Alarm_Type: processing-error
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: openstack
 
 401.003:
     Type: Log
     Alarm_Type: processing-error
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: starlingx
 
 401.005:
     Type: Log
     Alarm_Type: processing-error
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: starlingx
 
 401.007:
     Type: Log
     Alarm_Type: processing-error
     Probable_Cause: unspecified-reason
     Service_Affecting: True
+    Context: starlingx
 
 
 #---------------------------------------------------------------------------
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: none
 
 500.101:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 500.200:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 500.210:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 500.500:
     Type: Log
     Alarm_Type: integrity-violation
     Probable_Cause: information-modification-detected
     Service_Affecting: False
+    Context: none
 
 
 #---------------------------------------------------------------------------
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.002:
     Type: Alarm
     Description: Instance <instance_name> owned by <tenant_name> is paused on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
-    Proposed_Repair_Action: Unpause the instance
+    Proposed_Repair_Action: Un-pause the instance
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: processing-error
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.003:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.004:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.005:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.006:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.007:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.008:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.009:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.010:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.011:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.012:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.013:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.014:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.015:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: none
 
 700.016:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 700.017:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 
 700.101:
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.102:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.103:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.104:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.105:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.106:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.107:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.108:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.109:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.110:
     Type: Log
-    Description: Deleting instance <instance_name> owned by <tenatn_name>
+    Description: Deleting instance <instance_name> owned by <tenant_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.111:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.112:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.113:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.114:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.115:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.116:
     Type: Log
-    Description: Pause inprogress for instance <instance_name> on host <host_name>
+    Description: Pause in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.117:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.118:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.119:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.120:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.121:
     Type: Log
-    Description: "Unpause issued <by <tenant_name>|by the system>  against instance <instance_name> owned by <tenant_name> on host <host_name>[, reason = <reason_text>]"
+    Description: "Un-pause issued <by <tenant_name>|by the system>  against instance <instance_name> owned by <tenant_name> on host <host_name>[, reason = <reason_text>]"
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.122:
     Type: Log
-    Description: Unpause inprogress for instance <instance_name> on host <host_name>
+    Description: Un-pause in-progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.123:
     Type: Log
-    Description: "Unpause rejected for instance <instance_name> paused on host <host_name>[, reason = <reason_text>]"
+    Description: "Un-pause rejected for instance <instance_name> paused on host <host_name>[, reason = <reason_text>]"
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.124:
     Type: Log
-    Description: "Unpause cancelled for instance <instance_name> on host <host_name>[, reason = <reason_text>]"
+    Description: "Un-pause cancelled for instance <instance_name> on host <host_name>[, reason = <reason_text>]"
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.125:
     Type: Log
-    Description: "Unpause failed for instance <instance_name> on host <host_name>[, reason = <reason_text>]"
+    Description: "Un-pause failed for instance <instance_name> on host <host_name>[, reason = <reason_text>]"
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.126:
     Type: Log
-    Description: Unpause complete for instance <instance_name> now enabled on host <host_name>
+    Description: Un-pause complete for instance <instance_name> now enabled on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.127:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.128:
     Type: Log
-    Description: Suspend inprogress for instance <instance_name> on host <host_name>
+    Description: Suspend in-progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.129:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.130:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.131:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.132:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.133:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.134:
     Type: Log
-    Description: Resume inprogress for instance <instance_name> on host <host_name>
+    Description: Resume in-progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.135:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.136:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.137:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.138:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.139:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.140:
     Type: Log
-    Description: Start inprogress for instance <instance_name> on host <host_name>
+    Description: Start in-progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.141:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.142:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.143:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.144:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.145:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.146:
     Type: Log
-    Description: Stop inprogress for instance <instance_name> on host <host_name>
+    Description: Stop in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.147:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.148:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.149:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.150:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.151:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.152:
     Type: Log
-    Description: Live-Migrate inprogress for instance <instance_name> from host <host_name>
+    Description: Live-Migrate in progress for instance <instance_name> from host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.153:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.154:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.155:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.156:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.157:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.158:
     Type: Log
-    Description: Cold-Migrate inprogress for instance <instance_name> from host <host_name>
+    Description: Cold-Migrate in progress for instance <instance_name> from host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.159:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.160:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.161:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.162:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.163:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.164:
     Type: Log
-    Description: Cold-Migrate-Confirm inprogress for instance <instance_name> on host <host_name>
+    Description: Cold-Migrate-Confirm in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.165:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.166:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.167:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.168:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.169:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.170:
     Type: Log
-    Description: Cold-Migrate-Revert inprogress for instance <instance_name> from host <host_name>
+    Description: Cold-Migrate-Revert in progress for instance <instance_name> from host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.171:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.172:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.173:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.174:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.175:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.176:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.177:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.178:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.179:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.180:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.181:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.182:
     Type: Log
-    Description: Reboot inprogress for instance <instance_name> on host <host_name>
+    Description: Reboot in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.183:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.184:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.185:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.186:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.187:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.188:
     Type: Log
-    Description: Rebuild inprogress for instance <instance_name> on host <host_name>
+    Description: Rebuild in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.189:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.190:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.191:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.192:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.193:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.194:
     Type: Log
-    Description: Resize inprogress for instance <instance_name> on host <host_name>
+    Description: Resize in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.195:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.196:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.197:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.198:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.199:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.200:
     Type: Log
-    Description: Resize-Confirm inprogress for instance <instance_name> on host <host_name>
+    Description: Resize-Confirm in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.201:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.202:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.203:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.204:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.205:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.206:
     Type: Log
-    Description: Resize-Revert inprogress for instance <instance_name> on host <host_name>
+    Description: Resize-Revert in progress for instance <instance_name> on host <host_name>
     Entity_Instance_ID: tenant=<tenant-uuid>.instance=<instance-uuid>
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.207:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.208:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.209:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.210:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.211:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 700.212:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 700.213:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 700.214:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.215:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 700.216:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 
 700.217:
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: openstack
 
 #---------------------------------------------------------------------------
 #   APPLICATION
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 750.002:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 750.003:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 750.004:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 750.005:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 750.006:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 #---------------------------------------------------------------------------
 #   STORAGE
 800.001:
     Type: Alarm
     Description: |-
-        Storage Alarm Condition:
-        1 mons down, quorum 1,2 controller-1,storage-0
+        Possible data loss. Any mds, mon or osd is unavailable in storage replication group.
     Entity_Instance_ID: cluster=<dist-fs-uuid>
     Severity: [critical, major]
-    Proposed_Repair_Action: "If problem persists, contact next level of support."
+    Proposed_Repair_Action: "Manually restart Ceph processes and check the state of the Ceph cluster with
+                             'ceph -s'
+                             If problem persists, contact next level of support."
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: equipment
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 800.010:
     Type: Alarm
     Entity_Instance_ID: cluster=<dist-fs-uuid>.peergroup=<group-x>
     Severity: [critical]
     Proposed_Repair_Action: "Ensure storage hosts from replication group are unlocked and available.
+                             Check replication group state with 'system host-list'
                              Check if OSDs of each storage host are up and running.
-                             If problem persists contact next level of support."
+                             Manually restart Ceph processes and check the state of the Ceph OSDs with
+                             'ceph osd stat' OR 'ceph osd tree'
+                             If problem persists, contact next level of support."
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: equipment
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 800.011:
     Type: Alarm
     Entity_Instance_ID: cluster=<dist-fs-uuid>.peergroup=<group-x>
     Severity: [major]
     Proposed_Repair_Action: "Ensure storage hosts from replication group are unlocked and available.
+                             Check replication group state with 'system host-list'
                              Check if OSDs of each storage host are up and running.
-                             If problem persists contact next level of support."
+                             Manually restart Ceph processes and check the state of the Ceph OSDs with
+                             'ceph osd stat' AND/OR 'ceph osd tree'
+                             If problem persists, contact next level of support."
     Maintenance_Action:
     Inhibit_Alarms:
     Alarm_Type: equipment
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 800.002:
     Type: Alarm
-    Description: ["Image storage media is full: There is not enough disk space on the image storage media.",
-                  "Instance <instance name> snapshot failed: There is not enough disk space on the image storage media.",
-                  "Supplied <attrs> (<supplied>) and <attrs> generated from uploaded image (<actual>) did not match. Setting image status to 'killed'.",
-                  "Error in store configuration. Adding images to store is disabled.",
-                  "Forbidden upload attempt: <exception>",
-                  "Insufficient permissions on image storage media: <exception>",
-                  "Denying attempt to upload image larger than <size> bytes.",
-                  "Denying attempt to upload image because it exceeds the quota: <exception>",
-                  "Received HTTP error while uploading image <image_id>",
-                  "Client disconnected before sending all data to backend",
-                  "Failed to upload image <image_id>"]
-    Entity_Instance_ID: ["image=<image-uuid>, instance=<instance-uuid>",
-                         "tenant=<tenant-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>",
-                         "image=<image-uuid>, instance=<instance-uuid>"]
+    Description: |-
+        Image storage media is full: There is not enough disk space on the image storage media.
+        OR
+        Instance <instance name> snapshot failed: There is not enough disk space on the image storage media.
+        OR
+        Supplied <attrs> (<supplied>) and <attrs> generated from uploaded image (<actual>) did not match. Setting image status to 'killed'.
+        OR
+        Error in store configuration. Adding images to store is disabled.
+        OR
+        Forbidden upload attempt: <exception>.
+        OR
+        Insufficient permissions on image storage media: <exception>.
+        OR
+        Denying attempt to upload image larger than <size> bytes.
+        OR
+        Denying attempt to upload image because it exceeds the quota: <exception>.
+        OR
+        Received HTTP error while uploading image <image_id>.
+        OR
+        Client disconnected before sending all data to backend.
+        OR
+        Failed to upload image <image_id>.
+    Entity_Instance_ID:
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        tenant=<tenant-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
+        OR
+        image=<image-uuid> instance=<instance-uuid>
     Alarm_Type: [physical-violation,
                  physical-violation,
                  integrity-violation,
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 800.100:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: openstack
 
 800.101:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: openstack
 
-800.103:
+800.104:
     Type: Alarm
     Description: |-
         Storage Alarm Condition:
-        [ Metadata usage for LVM thin pool <VG name>/<Pool name> exceeded threshold and automatic extension failed,
-          Metadata usage for LVM thin pool <VG name>/<Pool name> exceeded threshold ]; threshold x%, actual y%.
-    Entity_Instance_ID: <hostname>.lvmthinpool=<VG name>/<Pool name>
+        <storage-backend-name> configuration failed to apply on host: <host-uuid>.
+    Entity_Instance_ID: storage_backend=<storage-backend-name>
     Severity: critical
-    Proposed_Repair_Action: "Increase Storage Space Allotment for Cinder on the 'lvm' backend.
-                             Consult the System Administration Manual for more details.
+    Proposed_Repair_Action: "Update backend setting to reapply configuration.
+                             Use the following commands to try again:
+                             'system storage-backend-delete <storage-backend-name>'
+                             AND
+                             'system storage-backend-add <storage-backend-name>'
+                             See the |prod-long| documentation at |docs-url| for more details.
                              If problem persists, contact next level of support."
     Maintenance_Action:
     Inhibit_Alarms:
-    Alarm_Type: operational-violation
-    Probable_Cause: threshold-crossed
-    Service_Affecting: False
+    Alarm_Type: equipment
+    Probable_Cause: configuration-or-customization-error
+    Service_Affecting: True
     Suppression: False
     Management_Affecting_Severity: major
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
-800.104:
+800.105:
     Type: Alarm
     Description: |-
-        Storage Alarm Condition:
-        <storage-backend-name> configuration failed to apply on host: <host-uuid>.
-    Entity_Instance_ID: storage_backend=<storage-backend-name>
-    Severity: critical
-    Proposed_Repair_Action: "Update backend setting to reapply configuration.
-                             Consult the System Administration Manual for more details.
+        Filesystem Alarm Condition:
+        <controllerfs_name> controller filesystem was not created/deleted successfully.
+    Entity_Instance_ID: host=<hostname>.controllerfs=<controllerfs_name>
+    Severity: major
+    Proposed_Repair_Action: "Use the create or delete command again:
+                             'system controllerfs-delete' or 'system controllerfs-add'.
                              If problem persists, contact next level of support."
     Maintenance_Action:
     Inhibit_Alarms:
-    Alarm_Type: equipment
-    Probable_Cause: configuration-or-customization-error
+    Alarm_Type: processing-error
+    Probable_Cause: unspecified-reason
     Service_Affecting: True
     Suppression: False
     Management_Affecting_Severity: major
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 #---------------------------------------------------------------------------
 #   KUBERNETES
     Suppression: False
     Management_Affecting_Severity: none
     Degrade_Affecting_Severity: none
+    Context: none
+
+850.002:
+    Type: Alarm
+    Description: Kubernetes cluster unreachable
+    Entity_Instance_ID: kubernetes=k8s-health-check-failed
+    Severity: major
+    Proposed_Repair_Action: "If problem persists
+                             contact next level of support."
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: communication
+    Probable_Cause: out-of-service
+    Service_Affecting: True
+    Suppression: False
+    Management_Affecting_Severity: major
+    Degrade_Affecting_Severity: none
+    Context: none
 
 #---------------------------------------------------------------------------
 #   SOFTWARE
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.002:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.003:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.004:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.005:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.006:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.007:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.008:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.009:
     Type: Alarm
     Suppression: False
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.010:
+    Type: Alarm
+    Description: System Config update in progress
+    Entity_Instance_ID: host=controller
+    Severity: minor
+    Proposed_Repair_Action: Wait for system config update to complete
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: operational-violation
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.011:
+    Type: Alarm
+    Description: System Config update aborted, configurations may not be fully updated
+    Entity_Instance_ID: host=<hostname>
+    Severity: minor
+    Proposed_Repair_Action: Lock the host, wait for the host resource in the deployment namespace to become in-sync, then unlock the host
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: operational-violation
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.020:
+    Type: Alarm
+    Description: Deploy host completed with success
+    Entity_Instance_ID: host=<hostname>
+    Severity: warning
+    Proposed_Repair_Action: Unlock host
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Suppression: False
+    Management_Affecting_Severity: none
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.021:
+    Type: Alarm
+    Description: Deploy host failed
+    Entity_Instance_ID: host=<hostname>
+    Severity: major
+    Proposed_Repair_Action: Check the logs for errors, fix the issues manually and retry
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: True
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.101:
     Type: Alarm
-    Description: Software patch auto-apply inprogress
+    Description: Software patch auto-apply in progress
     Entity_Instance_ID: orchestration=sw-patch
     Severity: major
     Proposed_Repair_Action: Wait for software patch auto-apply to complete; if problem persists contact next level of support
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.102:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.103:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.111:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.112:
     Type: Log
-    Description: Software patch auto-apply inprogress
+    Description: Software patch auto-apply in progress
     Entity_Instance_ID: orchestration=sw-patch
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.113:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.114:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.115:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.116:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.117:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.118:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.119:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.120:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.121:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.201:
     Type: Alarm
-    Description: Software upgrade auto-apply inprogress
+    Description: Software upgrade auto-apply in progress
     Entity_Instance_ID: orchestration=sw-upgrade
     Severity: major
     Proposed_Repair_Action: Wait for software upgrade auto-apply to complete; if problem persists contact next level of support
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.202:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.203:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.211:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.212:
     Type: Log
-    Description: Software upgrade auto-apply inprogress
+    Description: Software upgrade auto-apply in progress
     Entity_Instance_ID: orchestration=sw-upgrade
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.213:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.214:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.215:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.216:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.217:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.218:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.219:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.220:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.221:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
+
+900.231:
+    Type: Alarm
+    Description: Software deploy state out of sync
+    Entity_Instance_ID: orchestration=sw-upgrade
+    Severity: major
+    Proposed_Repair_Action: Wait for the deployment on the active controller to complete. If problem persists contact next level of support
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: True
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.301:
     Type: Alarm
-    Description: Firmware Update auto-apply inprogress
+    Description: Firmware Update auto-apply in progress
     Entity_Instance_ID: orchestration=fw-update
     Severity: major
     Proposed_Repair_Action: Wait for firmware update auto-apply to complete; if problem persists contact next level of support
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.302:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.303:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.311:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.312:
     Type: Log
-    Description: Firmware update auto-apply inprogress
+    Description: Firmware update auto-apply in progress
     Entity_Instance_ID: orchestration=fw-update
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.313:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.314:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.315:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.316:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.317:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.318:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.319:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.320:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.321:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.401:
     Type: Alarm
-    Description: Kubernetes upgrade auto-apply inprogress
+    Description: Kubernetes upgrade auto-apply in progress
     Entity_Instance_ID: orchestration=kube-upgrade
     Severity: major
     Proposed_Repair_Action: Wait for kubernetes upgrade auto-apply to complete; if problem persists contact next level of support
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: none
 
 900.402:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: none
 
 900.403:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: none
 
 900.411:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.412:
     Type: Log
-    Description: Kubernetes upgrade auto-apply inprogress
+    Description: Kubernetes upgrade auto-apply in progress
     Entity_Instance_ID: orchestration=kube-upgrade
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.413:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.414:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.415:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.416:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.417:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.418:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.419:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.420:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.421:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: none
 
 900.501:
     Type: Alarm
-    Description: Kubernetes rootca update auto-apply inprogress
+    Description: Kubernetes rootca update auto-apply in progress
     Entity_Instance_ID: orchestration=kube-rootca-update
     Severity: major
     Proposed_Repair_Action: Wait for kubernetes rootca update auto-apply to complete; if problem persists contact next level of support
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.502:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.503:
     Type: Alarm
     Suppression: True
     Management_Affecting_Severity: warning
     Degrade_Affecting_Severity: none
+    Context: starlingx
 
 900.511:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.512:
     Type: Log
-    Description: Kubernetes rootca update auto-apply inprogress
+    Description: Kubernetes rootca update auto-apply in progress
     Entity_Instance_ID: orchestration=kube-rootca-update
     Severity: critical
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.513:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.514:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.515:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.516:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.517:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.518:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.519:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.520:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
 
 900.521:
     Type: Log
     Alarm_Type: equipment
     Probable_Cause: unspecified-reason
     Service_Affecting: False
+    Context: starlingx
+
+900.601:
+    Type: Alarm
+    Description: System config update auto-apply in progress
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: major
+    Proposed_Repair_Action: Wait for system config update auto-apply to complete; if problem persists contact next level of support
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: True
+    Suppression: True
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.602:
+    Type: Alarm
+    Description: System config update auto-apply aborting
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: major
+    Proposed_Repair_Action: Wait for system config update auto-apply abort to complete; if problem persists contact next level of support
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: True
+    Suppression: True
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.603:
+    Type: Alarm
+    Description: System config update auto-apply failed. Command "sw-manager kube-upgrade-strategy apply" failed
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Proposed_Repair_Action: Attempt to apply system config update manually; if problem persists contact next level of support
+    Maintenance_Action:
+    Inhibit_Alarms:
+    Alarm_Type: equipment
+    Probable_Cause: underlying-resource-unavailable
+    Service_Affecting: True
+    Suppression: True
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: none
+    Context: starlingx
+
+900.611:
+    Type: Log
+    Description: System config update auto-apply start
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.612:
+    Type: Log
+    Description: System config update auto-apply in progress
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.613:
+    Type: Log
+    Description: System config update auto-apply rejected
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.614:
+    Type: Log
+    Description: System config update auto-apply cancelled
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.615:
+    Type: Log
+    Description: System config update auto-apply failed
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.616:
+    Type: Log
+    Description: System config update auto-apply completed
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.617:
+    Type: Log
+    Description: System config update auto-apply abort
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.618:
+    Type: Log
+    Description: System config update auto-apply aborting
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.619:
+    Type: Log
+    Description: System config update auto-apply abort rejected
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.620:
+    Type: Log
+    Description: System config update auto-apply abort failed
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.621:
+    Type: Log
+    Description: System config update auto-apply aborted
+    Entity_Instance_ID: orchestration=system-config-update
+    Severity: critical
+    Alarm_Type: equipment
+    Probable_Cause: unspecified-reason
+    Service_Affecting: False
+    Context: starlingx
+
+900.701:
+    Type: Alarm
+    Description: Node <hostname> tainted.
+    Entity_Instance_ID: host=<hostname>
+    Severity: major
+    Proposed_Repair_Action: |-
+            "Execute 'kubectl taint nodes <hostname> services=disabled:NoExecute-'
+            If it fails, Execute 'system host-lock <hostname>' followed by
+            'system host-unlock <hostname>'.
+            If issue still persists, contact next level of support."
+    Maintenance_Action: none
+    Inhibit_Alarms:
+    Alarm_Type: operational-violation
+    Probable_Cause: unknown
+    Service_Affecting: True
+    Suppression: False
+    Management_Affecting_Severity: warning
+    Degrade_Affecting_Severity: major
+    Context: starlingx
 ...