-FROM python:3.10-slim-buster
-
-RUN apt-get update && apt-get install -y git gcc procps vim curl ssh
-
-# in case git repo is not accessable
-# RUN mkdir -p /cgtsclient
-# COPY temp/config /cgtsclient/
-RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/config.git /cgtsclient
-RUN pip install -e /cgtsclient/sysinv/cgts-client/cgts-client/
-
-# RUN mkdir -p /distcloud-client
-# COPY temp/distcloud-client /distcloud-client/
-RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/distcloud-client.git /distcloud-client/
-RUN pip install -e /distcloud-client/distributedcloud-client
-# in case git repo is not accessable
-
-# RUN git clone --depth 1 --branch master https://github.com/cloudify-incubator/cloudify-helm-plugin.git /helmsdk
-RUN git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/fault.git /faultclient
-RUN pip install -e /faultclient/python-fmclient/fmclient/
-
+FROM nexus3.onap.org:10001/onap/integration-python:10.1.0
+# https://nexus3.onap.org/#browse/search=keyword%3Dintegration-python:d406d405e4cfbf1186265b01088caf9a
+# https://git.onap.org/integration/docker/onap-python/tree/Dockerfile
+
+USER root
+
+ARG user=orano2
+ARG group=orano2
+# Create a group and user
+RUN addgroup -S $group && adduser -S -D -h /home/$user $user $group && \
+ chown -R $user:$group /home/$user && \
+ mkdir /var/log/$user && \
+ mkdir -p /src && \
+ mkdir -p /configs/ && \
+ mkdir -p /src/o2app/ && \
+ mkdir -p /src/helm_sdk/ && \
+ mkdir -p /etc/o2/ && \
+ chown -R $user:$group /var/log/$user && \
+ chown -R $user:$group /src && \
+ chown -R $user:$group /configs && \
+ chown -R $user:$group /etc/o2/
COPY requirements.txt /tmp/
COPY requirements-stx.txt /tmp/
COPY constraints.txt /tmp/
-RUN pip install -r /tmp/requirements.txt -c /tmp/constraints.txt
-# RUN pip install -r /tmp/requirements-stx.txt
-
-COPY requirements-test.txt /tmp/
-RUN pip install -r /tmp/requirements-test.txt
-
-
-RUN mkdir -p /src
COPY o2ims/ /src/o2ims/
COPY o2dms/ /src/o2dms/
COPY o2common/ /src/o2common/
-
-RUN mkdir -p /src/helm_sdk/
-COPY helm_sdk/ /src/helm_sdk/
-
-RUN mkdir -p /configs/
-COPY configs/ /configs/
-
-RUN mkdir -p /src/o2app/
COPY o2app/ /src/o2app/
COPY setup.py /src/
-RUN pip install -e /src
+COPY helm_sdk/ /src/helm_sdk/
COPY configs/ /etc/o2/
+COPY configs/ /configs/
-COPY tests/ /tests/
-
-RUN curl -O https://get.helm.sh/helm-v3.3.1-linux-amd64.tar.gz;
-RUN tar -zxvf helm-v3.3.1-linux-amd64.tar.gz; cp linux-amd64/helm /usr/local/bin
+RUN set -ex \
+ && apk add --no-cache bash \
+ && apk add --no-cache --virtual .fetch2-deps \
+ git curl \
+ && apk add --no-cache --virtual .build2-deps \
+ bluez-dev \
+ bzip2-dev \
+ dpkg-dev dpkg \
+ expat-dev \
+ gcc \
+ libc-dev \
+ libffi-dev \
+ libnsl-dev \
+ libtirpc-dev \
+ linux-headers \
+ make \
+ ncurses-dev \
+ openssl-dev \
+ pax-utils \
+ sqlite-dev \
+ tcl-dev \
+ tk \
+ tk-dev \
+ util-linux-dev \
+ xz-dev \
+ zlib-dev \
+ && pip install -r /tmp/requirements.txt -r /tmp/requirements-stx.txt -c /tmp/constraints.txt \
+ && curl -O https://get.helm.sh/helm-v3.3.1-linux-amd64.tar.gz; \
+ tar -zxvf helm-v3.3.1-linux-amd64.tar.gz; \
+ cp linux-amd64/helm /usr/local/bin; \
+ rm -f helm-v3.3.1-linux-amd64.tar.gz \
+ && pip install -e /src \
+ && apk del --no-network .fetch2-deps \
+ && apk del --no-network .build2-deps
+
+# && pip install -r /tmp/requirements.txt -r /tmp/requirements-stx.txt -c /tmp/constraints.txt
+# RUN apt-get update && apt-get install -y git gcc procps vim curl ssh
+# && git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/config.git /cgtsclient \
+# && git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/distcloud-client.git /distcloud-client/ \
+# && git clone --depth 1 --branch r/stx.7.0 https://opendev.org/starlingx/fault.git /faultclient \
+# && pip install -e /cgtsclient/sysinv/cgts-client/cgts-client \
+# && pip install -e /distcloud-client/distributedcloud-client \
+# && pip install -e /faultclient/python-fmclient/fmclient \
+# && rm -rf /cgtsclient /distcloud-client /faultclient
WORKDIR /src
+
+# USER $user
# pip install retry
# pip install -e /root/o2
-pip install -e /src
+# pip install -e /src
cat <<EOF>>/etc/hosts
127.0.0.1 api
root:
handlers: [console_handler, file_handler]
level: "WARNING"
- propagate: False
- o2common:
- handlers: [console_handler, file_handler]
- level: "WARNING"
- propagate: False
- o2ims:
- handlers: [console_handler, file_handler]
- level: "DEBUG"
- propagate: False
- o2dms:
- handlers: [console_handler, file_handler]
- level: "DEBUG"
- propagate: False
+ # propagate: False
+ # o2common:
+ # handlers: [console_handler, file_handler]
+ # level: "WARNING"
+ # propagate: True
+ # o2ims:
+ # handlers: [console_handler, file_handler]
+ # level: "WARNING"
+ # propagate: True
+ # o2dms:
+ # handlers: [console_handler, file_handler]
+ # level: "WARNING"
+ # propagate: True
handlers:
console_handler:
- level: "DEBUG"
+ level: "NOTSET"
class: "logging.StreamHandler"
formatter: "standard"
file_handler:
- level: "DEBUG"
+ level: "NOTSET"
class: "logging.handlers.RotatingFileHandler"
- filename: "/var/log/o2.log"
+ filename: "/var/log/orano2/o2.log"
formatter: "standard"
maxBytes: 52428800
backupCount: 10
def get_postgres_uri():
- host = os.environ.get("DB_HOST", "localhost")
- port = 54321 if host == "localhost" else 5432
+ # host = os.environ.get("DB_HOST", "localhost")
+ # port = 54321 if host == "localhost" else 5432
+ host = "localhost"
+ port = 5432
password = os.environ.get("DB_PASSWORD", "o2ims123")
user, db_name = "o2ims", "o2ims"
return f"postgresql://{user}:{password}@{host}:{port}/{db_name}"
def get_api_url():
- host_interal = os.environ.get("API_HOST", "localhost")
+ # host_interal = os.environ.get("API_HOST", "localhost")
+ host_interal = "localhost"
host_external = os.environ.get("API_HOST_EXTERNAL_FLOATING")
if config.conf.OCLOUD.API_HOST_EXTERNAL_FLOATING is not None and \
config.conf.OCLOUD.API_HOST_EXTERNAL_FLOATING != '':
def get_redis_host_and_port():
- host = os.environ.get("REDIS_HOST", "localhost")
- port = 63791 if host == "localhost" else 6379
+ # host = os.environ.get("REDIS_HOST", "localhost")
+ # port = 63791 if host == "localhost" else 6379
+ host = "localhost"
+ port = 6379
return dict(host=host, port=port)
def gen_orm_filter(obj: ColumnElement, filter_str: str):
if not filter_str:
return []
- filter_without_space = filter_str.replace(" ", "")
+ # filter_without_space = filter_str.replace(" ", "")
+ filter_without_space = filter_str.strip(' ()')
items = filter_without_space.split(';')
filter_list = list()
for i in items:
- if '(' in i:
- i = i.replace("(", "")
- if ')' in i:
- i = i.replace(")", "")
+ # if '(' in i:
+ # i = i.replace("(", "")
+ # if ')' in i:
+ # i = i.replace(")", "")
filter_expr = i.split(',')
if len(filter_expr) < 3:
continue
- filter_op = filter_expr[0]
- filter_key = filter_expr[1]
+ filter_op = filter_expr[0].strip()
+ filter_key = filter_expr[1].strip()
filter_vals = filter_expr[2:]
filter_list.extend(toFilterArgs(
filter_op, obj, filter_key, filter_vals))
- logger.info('Filter list length: %d' % len(filter_list))
+ logger.debug('Filter list length: %d' % len(filter_list))
return filter_list
def toFilterArgs(operation: str, obj: ColumnElement, key: str, values: list):
- if not hasattr(obj, key):
- logger.warning('Filter attrName %s not in Object %s.' %
- (key, str(obj)))
- raise KeyError(
- 'Filter attrName {} not in the Object'.format(key))
+ # if not hasattr(obj, key):
+ # logger.warning('Filter attrName %s not in Object %s' %
+ # (key, str(obj)))
+ # raise KeyError(
+ # 'Filter attrName {} not in the Object'.format(key))
- if operation in ['eq', 'neq', 'gt', 'lt', 'gte', 'lte']:
- if len(values) != 1:
- raise KeyError(
- 'Filter operation one {} is only support one value.'.
- format(operation))
- elif operation in ['in', 'nin', 'cont', 'ncont']:
- if len(values) == 0:
- raise KeyError('Filter operation {} value is needed.'.
- format(operation))
- else:
- raise KeyError('Filter operation {} not support.'.format(operation))
+ # if operation in ['eq', 'neq', 'gt', 'lt', 'gte', 'lte']:
+ # if len(values) != 1:
+ # raise KeyError(
+ # 'Filter operation one {} is only support one value'.
+ # format(operation))
+ # elif operation in ['in', 'nin', 'cont', 'ncont']:
+ # if len(values) == 0:
+ # raise KeyError('Filter operation {} value is needed'.
+ # format(operation))
+ # else:
+ # raise KeyError('Filter operation {} not support'.format(operation))
ll = list()
if operation == 'eq':
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
+
+from o2common.helper.o2logging import configure_logger
+
+
+# make sure init logger at the first chance
+configure_logger()
def get_logger(name=None):
- CONFIG_FILE = os.environ.get(
- "LOGGING_CONFIG_FILE", "/etc/o2/log.yaml")
- if os.path.exists(CONFIG_FILE):
- with open(file=CONFIG_FILE, mode='r', encoding="utf-8") as file:
- config_yaml = yaml.load(stream=file, Loader=yaml.FullLoader)
- logging.config.dictConfig(config=config_yaml)
+ # CONFIG_FILE = os.environ.get(
+ # "LOGGING_CONFIG_FILE", "/etc/o2/log.yaml")
+ # if os.path.exists(CONFIG_FILE):
+ # with open(file=CONFIG_FILE, mode='r', encoding="utf-8") as file:
+ # config_yaml = yaml.load(stream=file, Loader=yaml.FullLoader)
+ # logging.config.dictConfig(config=config_yaml)
logger = logging.getLogger(name)
-
- # override logging level
+ # override root logger's logging level
LOGGING_CONFIG_LEVEL = os.environ.get("LOGGING_CONFIG_LEVEL", None)
if LOGGING_CONFIG_LEVEL:
logger.setLevel(LOGGING_CONFIG_LEVEL)
return logger
+
+
+def configure_logger():
+ CONFIG_FILE = os.environ.get(
+ "LOGGING_CONFIG_FILE", "/etc/o2/log.yaml")
+ if os.path.exists(CONFIG_FILE):
+ with open(file=CONFIG_FILE, mode='r', encoding="utf-8") as file:
+ config_yaml = yaml.load(stream=file, Loader=yaml.FullLoader)
+ logging.config.dictConfig(config=config_yaml)
--- /dev/null
+# Copyright (C) 2022 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from flask_restx import fields
+import json
+
+from o2common.helper import o2logging
+logger = o2logging.get_logger(__name__)
+
+
+class Json2Dict(fields.Raw):
+
+ def format(self, value):
+ value2 = None
+ try:
+ value2 = json.loads(value) if value else None
+ except Exception as ex:
+ logger.warning(
+ f"Failed to loads json string: {value}, exception: {str(ex)}")
+ value2 = value
+ return value2
def get_result(self, ret: Tuple[int, List[Serializer]]):
count = ret[0]
- logger.info('List count: {}'.format(count))
+ logger.debug('List count: {}'.format(count))
ret_list = ret[1]
page_total = int(math.ceil(count/self.limit)
) if count > self.limit else 1
mask_val = ''
if 'all_fields' in kwargs:
all_fields_without_space = kwargs['all_fields'].replace(" ", "")
- logger.info('all_fields selector value is {}'.format(
+ logger.debug('all_fields selector value is {}'.format(
all_fields_without_space))
# all_fields = all_fields_without_space.lower()
# if 'true' == all_fields:
# See the License for the specific language governing permissions and
# limitations under the License.
-import re
+# import re
from sqlalchemy.sql.elements import ColumnElement
from o2common.views.route_exception import BadRequestException
def check_filter(obj: ColumnElement, filter_str: str):
if not filter_str:
return
- pattern = r'^(\((eq|neq|gt|lt|gte|lte){1},\w+,[\w -\.]+\)\;?|' +\
- r'\((in|nin|cont|ncont){1},\w*(,[\w -\.]*)*\)\;?)+'
- result = re.match(pattern, filter_str)
- logger.warning('filter: {} match result is {}'.format(filter_str, result))
- if not result:
- raise BadRequestException(
- 'filter value formater not correct.')
+ # pattern = r'^(\((eq|neq|gt|lt|gte|lte){1},\w+,[\w -\.]+\)\;?|' +\
+ # r'\((in|nin|cont|ncont){1},\w*(,[\w -\.]*)*\)\;?)+'
+ # result = re.match(pattern, filter_str)
+ # logger.debug('filter: {} match result is {}'.format(filter_str, result))
+ # if not result:
+ # raise BadRequestException(
+ # 'filter value format is invalid')
check_filter_attribute(obj, filter_str)
def check_filter_attribute(obj: ColumnElement, filter_str: str):
- filter_without_space = filter_str.replace(" ", "")
+ # filter_without_space = filter_str.replace(" ", "")
+ filter_without_space = filter_str.strip(' ()')
+ logger.debug(
+ f"filter_str: {filter_str}, stripped: {filter_without_space}")
items = filter_without_space.split(';')
for i in items:
- if '(' in i:
- i = i.replace("(", "")
- if ')' in i:
- i = i.replace(")", "")
+ # if '(' in i:
+ # i = i.replace("(", "")
+ # if ')' in i:
+ # i = i.replace(")", "")
filter_expr = i.split(',')
if len(filter_expr) < 3:
raise BadRequestException(
- 'Filter {} formater not correct.'.format(i))
+ 'ignore invalid filter {}'.format(i))
continue
- # filter_op = filter_expr[0]
- filter_key = filter_expr[1]
- # filter_vals = filter_expr[2:]
+ filter_op = filter_expr[0].strip()
+ filter_key = filter_expr[1].strip()
+ filter_vals = filter_expr[2:]
+ if filter_op in ["eq", "neq", "gt", "lt", "gte", "lte"]:
+ if len(filter_vals) != 1:
+ raise BadRequestException(
+ "Found {} values: {} while only single value"
+ " is allowed for operation {}".format(
+ len(filter_vals), filter_vals, filter_op)
+ )
+ elif filter_op not in ["in", "nin", "cont", "ncont"]:
+ raise BadRequestException(
+ 'Filter operation {} is invalid'.format(filter_op)
+ )
+ else:
+ pass
if not hasattr(obj, filter_key):
raise BadRequestException(
- 'Filter attrName {} not in the Object'.format(filter_key))
+ 'Filter attrName {} is invalid'.format(filter_key))
ResourceTypeEnum.OCLOUD, systems[0]) if systems else None
def getSubcloudList(self):
+ self.dcclient = self.getDcmanagerClient()
subs = self.dcclient.subcloud_manager.list_subclouds()
known_subs = [sub for sub in subs if sub.sync_status != 'unknown']
return known_subs
Column("name", String(255)),
Column("globalAssetId", String(255)),
Column("parentId", String(255)),
- Column("description", String(255)),
- Column("elements", Text())
- # Column("extensions", String(1024))
+ Column("description", String()),
+ Column("elements", Text()),
+ Column("extensions", String())
)
deploymentmanager = Table(
def __init__(self, resourceId: str, resourceTypeId: str,
resourcePoolId: str, name: str, parentId: str = '',
gAssetId: str = '', elements: str = '',
- description: str = '') -> None:
+ description: str = '', extensions: str = '') -> None:
super().__init__()
self.resourceId = resourceId
self.description = description
self.globalAssetId = gAssetId
self.resourcePoolId = resourcePoolId
self.elements = elements
- self.extensions = []
+ self.extensions = extensions
self.name = name
self.parentId = parentId
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from o2ims.domain import commands, events
from o2ims.domain.stx_object import StxGenericModel
resourcepool_id = parent.resourcePoolId
parent_id = parent.resourceId
gAssetId = '' # TODO: global ID
- description = "%s : An Accelerator resource of the physical server"\
- % stxobj.name
+ # description = "%s : An Accelerator resource of the physical server"\
+ # % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "name", "pdevice", "pciaddr", "pvendor_id", "pvendor",
+ "pclass_id", "pclass", "psvendor", "psdevice",
+ "sriov_totalvfs", "sriov_numvfs", "numa_node"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from o2ims.domain import commands, events
from o2ims.domain.stx_object import StxGenericModel
resourcepool_id = parent.resourcePoolId
parent_id = parent.resourceId
gAssetId = '' # TODO: global ID
- description = "%s : A CPU resource of the physical server" % stxobj.name
+ # description = "%s : A CPU resource of the physical server" % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "cpu", "core", "thread", "allocated_function", "numa_node",
+ "cpu_model", "cpu_family"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from o2ims.domain import commands, events
from o2ims.domain.stx_object import StxGenericModel
resourcepool_id = parent.resourcePoolId
parent_id = parent.resourceId
gAssetId = '' # TODO: global ID
- description = "%s : An ethernet resource of the physical server"\
- % stxobj.name
+ # description = "%s : An ethernet resource of the physical server"\
+ # % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "name", "namedisplay", "dev_id", "pdevice", "capabilities",
+ "type", "driver", "mac", "numa_node",
+ "pciaddr", "pclass", "psvendor", "psdevice",
+ "sriov_totalvfs", "sriov_numvfs", "dpdksupport",
+ "sriov_vf_driver", "sriov_vf_pdevice_id", "interface_uuid"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from typing import Callable
from o2ims.domain import commands, events
resourcepool_id = parentid
parent_id = None # the root of the resource has no parent id
gAssetId = '' # TODO: global ID
- description = "%s : A physical server resource" % stxobj.name
+ # description = "%s : A physical server resource" % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "hostname", "personality", "id", "mgmt_ip", "mgmt_mac",
+ "software_load", "capabilities",
+ "operational", "availability", "administrative",
+ "boot_device", "rootfs_device", "install_state", "subfunctions",
+ "clock_synchronization", "max_cpu_mhz_allowed"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from o2ims.domain import commands, events
from o2ims.domain.stx_object import StxGenericModel
resourcepool_id = parent.resourcePoolId
parent_id = parent.resourceId
gAssetId = '' # TODO: global ID
- description = "%s : An interface resource of the physical server"\
- % stxobj.name
+ # description = "%s : An interface resource of the physical server"\
+ # % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "ifname", "iftype", "imac", "vlan_id", "imtu",
+ "ifclass", "uses", "max_tx_rate",
+ "sriov_vf_driver", "sriov_numvfs", "ptp_role"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
# pylint: disable=unused-argument
from __future__ import annotations
import uuid
-# import json
+import json
from o2ims.domain import commands, events
from o2ims.domain.stx_object import StxGenericModel
resourcepool_id = parent.resourcePoolId
parent_id = parent.resourceId
gAssetId = '' # TODO: global ID
- description = "%s : A memory resource of the physical server"\
- % stxobj.name
+ # description = "%s : A memory resource of the physical server"\
+ # % stxobj.name
+ content = json.loads(stxobj.content)
+ selected_keys = [
+ "memtotal_mib", "memavail_mib", "vm_hugepages_use_1G",
+ "vm_hugepages_possible_1G", "hugepages_configured",
+ "vm_hugepages_avail_1G", "vm_hugepages_nr_1G",
+ "vm_hugepages_nr_4K", "vm_hugepages_nr_2M",
+ "vm_hugepages_possible_2M", "vm_hugepages_avail_2M",
+ "platform_reserved_mib", "numa_node"
+ ]
+ filtered = dict(
+ filter(lambda item: item[0] in selected_keys, content.items()))
+ extensions = json.dumps(filtered)
+ description = ";".join([f"{k}:{v}" for k, v in filtered.items()])
resource = Resource(stxobj.id, resourcetype_id, resourcepool_id,
stxobj.name, parent_id, gAssetId, stxobj.content,
- description)
+ description, extensions)
resource.createtime = stxobj.createtime
resource.updatetime = stxobj.updatetime
resource.hash = stxobj.hash
required=True,
description='Alarm Event Record ID'),
'resourceTypeId': fields.String,
+ 'resourceTypeID': fields.String(attribute='resourceTypeId'),
'resourceId': fields.String,
+ 'resourceID': fields.String(attribute='resourceId'),
+ 'alarmEventRecordID':
+ fields.String(attribute='alarmEventRecordId'),
'alarmDefinitionId': fields.String,
+ 'alarmDefinitionID': fields.String(attribute='alarmDefinitionId'),
+ 'probableCauseId': fields.String,
+ 'probableCauseID': fields.String(attribute='probableCauseId'),
'alarmRaisedTime': fields.String,
'perceivedSeverity': fields.String,
+ 'alarmChangedTime': fields.String,
+ 'alarmAcknowledgeTime': fields.String,
+ 'alarmAcknowledged': fields.Boolean,
+ 'extensions': fields.Raw(attribute='extensions'),
}
)
from flask_restx import fields
from o2ims.views.api_ns import api_ims_inventory as api_ims_inventory_v1
+from o2common.views.flask_restx_fields import Json2Dict
class OcloudDTO:
'parentId': fields.String,
'description': fields.String,
# 'elements': fields.String,
- 'extensions': fields.String
+ # 'extensions': fields.String
+ 'extensions': Json2Dict(attribute='extensions')
+ # 'extensions': fields.Raw(attribute='extensions')
},
mask='{resourceId,resourcePoolId,resourceTypeId,description,parentId}'
)
'parentId': fields.String,
'description': fields.String,
# 'elements': fields.String,
- 'extensions': fields.String
+ # 'extensions': fields.String
+ 'extensions': Json2Dict(attribute='extensions')
+ # 'extensions': fields.Raw(attribute='extensions')
}
if iteration_number:
resource_json_mapping['elements'] = fields.List(
# ---------- Resources ---------- #
@api_ims_inventory_v1.route("/v1/resourcePools/<resourcePoolID>/resources")
@api_ims_inventory_v1.param('resourcePoolID', 'ID of the resource pool')
+@api_ims_inventory_v1.response(404, 'Resource pool not found')
# @api_ims_inventory_v1.param('sort', 'sort by column name',
# _in='query')
# @api_ims_inventory_v1.param('per_page', 'The number of results per page ' +
if args.nextpage_opaque_marker is not None:
kwargs['page'] = args.nextpage_opaque_marker
kwargs['filter'] = args.filter if args.filter is not None else ''
-
ret = ocloud_view.resources(resourcePoolID, bus.uow, **kwargs)
+ if ret is None:
+ raise NotFoundException("Resources under {} doesn't exist".format(
+ resourcePoolID))
return link_header(request.full_path, ret)
@api_ims_inventory_v1.doc('Get resource')
@api_ims_inventory_v1.marshal_with(model)
def get(self, resourcePoolID, resourceID):
- result = ocloud_view.resource_one(resourceID, bus.uow)
- if result is not None:
- return result
- raise NotFoundException("Resource {} doesn't exist".format(
- resourceID))
+ result = ocloud_view.resource_one(resourceID, bus.uow, resourcePoolID)
+ if result is None:
+ raise NotFoundException("Resource {} doesn't exist".format(
+ resourceID))
+ return result
# ---------- DeploymentManagers ---------- #
def resources(resourcePoolId: str, uow: unit_of_work.AbstractUnitOfWork,
**kwargs):
+ with uow:
+ first = uow.resource_pools.get(resourcePoolId)
+ if first is None:
+ raise NotFoundException("ResourcePool {} doesn't exist".format(
+ resourcePoolId))
pagination = Pagination(**kwargs)
# filter key should be the same with database name
query_kwargs = pagination.get_pagination()
return pagination.get_result(ret)
-def resource_one(resourceId: str, uow: unit_of_work.AbstractUnitOfWork):
+def resource_one(resourceId: str,
+ uow: unit_of_work.AbstractUnitOfWork, resourcePoolId: str):
with uow:
- first = uow.resources.get(resourceId)
- return first.serialize() if first is not None else None
+ resoucePool = uow.resource_pools.get(resourcePoolId)
+ if resoucePool is None:
+ raise NotFoundException("ResourcePool {} doesn't exist".format(
+ resourcePoolId))
+
+ first = uow.resources.get(resourceId)
+ if first is None:
+ raise NotFoundException("Resource {} doesn't exist".format(
+ resourceId))
+ return first.serialize()
def deployment_managers(uow: unit_of_work.AbstractUnitOfWork, **kwargs):
--e git+https://opendev.org/starlingx/distcloud-client.git@master#egg=distributedcloud-client&subdirectory=distributedcloud-client
--e git+https://opendev.org/starlingx/config.git@master#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client
-# -e git+https://github.com/cloudify-incubator/cloudify-helm-plugin.git@master#egg=helmsdk&subdirectory=helm_sdk
--e git+https://opendev.org/starlingx/fault.git@master#egg=fmclient&subdirectory=python-fmclient/fmclient
+# -e git+https://opendev.org/starlingx/distcloud-client.git@master#egg=distributedcloud-client&subdirectory=distributedcloud-client
+# -e git+https://opendev.org/starlingx/config.git@master#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client
+# -e git+https://opendev.org/starlingx/fault.git@master#egg=fmclient&subdirectory=python-fmclient/fmclient
+
+-e git+https://opendev.org/starlingx/distcloud-client.git@r/stx.7.0#egg=distributedcloud-client&subdirectory=distributedcloud-client
+-e git+https://opendev.org/starlingx/config.git@r/stx.7.0#egg=cgtsclient&subdirectory=sysinv/cgts-client/cgts-client
+-e git+https://opendev.org/starlingx/fault.git@r/stx.7.0#egg=fmclient&subdirectory=python-fmclient/fmclient
# https://github.com/python-restx/flask-restx/issues/460
# Workaround for this issue
-werkzeug<=2.1.2
\ No newline at end of file
+werkzeug<=2.1.2
+
+pyOpenSSL
return_value.serialize.return_value = None
# Query return None
- resource_res = ocloud_view.resource_one(resource_id1, uow)
+ resource_res = ocloud_view.resource_one(
+ resource_id1, uow, resource_pool_id1)
assert resource_res is None
session.return_value.query.return_value.filter_by.return_value.first.\
"resourceId": resource_id1,
"resourcePoolId": resource_pool_id1
}
-
- resource_res = ocloud_view.resource_one(resource_id1, uow)
- assert str(resource_res.get("resourceId")) == resource_id1
+ resource_res = ocloud_view.resource_one(
+ resource_id1, uow, resource_pool_id1)
+ assert str(resource_res.get("resourceId") == resource_id1)
+ assert str(resource_res.get("resourcePoolId") == resource_pool_id1)
def test_view_deployment_managers(mock_uow):