Docs: Enable O2 DMS by exposing k8s API endpoint 10/8410/8
authorZhang Rong(Jon) <rong.zhang@windriver.com>
Thu, 26 May 2022 08:20:01 +0000 (16:20 +0800)
committerZhang Rong(Jon) <rong.zhang@windriver.com>
Fri, 10 Jun 2022 16:38:32 +0000 (00:38 +0800)
1. Update api doc, add the default value of the profile
2. Add the k8s profile user guide, support get profile with ESTI number,
like sol0018
3. Remove the profile file download

Issue-ID: INF-264
Signed-off-by: Zhang Rong(Jon) <rong.zhang@windriver.com>
Change-Id: I3ceb88b29078f8b63baa0d8082b02999e4f80525

docs/api.rst
docs/user-guide.rst
o2dms/api/dms_dto.py
o2dms/api/dms_lcm_view.py
o2dms/api/dms_route.py
o2ims/domain/ocloud.py
o2ims/views/ocloud_dto.py
o2ims/views/ocloud_route.py
o2ims/views/ocloud_view.py
tests/unit/test_ocloud.py

index 4763897..32c8f90 100644 (file)
@@ -1,9 +1,9 @@
 .. This work is licensed under a Creative Commons Attribution 4.0 International License.
 .. SPDX-License-Identifier: CC-BY-4.0
-.. Copyright (C) 2021 Wind River Systems, Inc.
+.. Copyright (C) 2021-2022 Wind River Systems, Inc.
 
 INF O2 Services API 1.0.0
-=============================
+=========================
 
 .. toctree::
     :maxdepth: 3
@@ -877,6 +877,7 @@ Parameters
     :header: "Name", "Located in", "Required", "Type", "Format", "Properties", "Description"
     :widths: 20, 15, 10, 10, 10, 20, 30
 
+        profile | query | No | string |  |  | DMS profile: value supports "sol0018"
         deploymentManagerID | path | Yes | string |  |  | ID of the deployment manager
 
 
@@ -914,6 +915,14 @@ Type: :ref:`DeploymentManagerGetDto <d_e936cc219a004ab92ac027b2690bdd5e>`
         "deploymentManagerId": "somestring",
         "description": "somestring",
         "name": "somestring",
+        "profileData": {
+            "admin_client_cert": "somestring",
+            "admin_client_key": "somestring",
+            "admin_user": "somestring",
+            "cluster_api_endpoint": "somestring",
+            "cluster_ca_cert": "somestring"
+        },
+        "profileName": "somestring",
         "supportedLocations": "somestring"
     }
 
@@ -1319,7 +1328,7 @@ Responses
 Success
 
 
-Type: array of :ref:`DeploymentManagerGetDto <d_e936cc219a004ab92ac027b2690bdd5e>`
+Type: array of :ref:`DeploymentManagerListDto <d_b50b514bc3afc99684dcf3a7c2fc8b60>`
 
 
 **Example:**
@@ -1334,6 +1343,10 @@ Type: array of :ref:`DeploymentManagerGetDto <d_e936cc219a004ab92ac027b2690bdd5e
             "deploymentManagerId": "somestring",
             "description": "somestring",
             "name": "somestring",
+            "profileSupportList": [
+                "somestring",
+                "somestring"
+            ],
             "supportedLocations": "somestring"
         },
         {
@@ -1343,6 +1356,10 @@ Type: array of :ref:`DeploymentManagerGetDto <d_e936cc219a004ab92ac027b2690bdd5e
             "deploymentManagerId": "somestring",
             "description": "somestring",
             "name": "somestring",
+            "profileSupportList": [
+                "somestring",
+                "somestring"
+            ],
             "supportedLocations": "somestring"
         }
     ]
@@ -1828,6 +1845,43 @@ DeploymentManagerGetDto Model Structure
         deploymentManagerId | Yes | string |  |  | Deployment manager ID
         description | No | string |  |  | 
         name | No | string |  |  | 
+        profileData | No | :ref:`DeploymentManagerGetDtoProfile <d_51fed249638cf054f9cc4c7832c7cbe4>` |  |  | 
+        profileName | No | string |  |  | 
+        supportedLocations | No | string |  |  | 
+
+.. _d_51fed249638cf054f9cc4c7832c7cbe4:
+
+DeploymentManagerGetDtoProfile Model Structure
+----------------------------------------------
+
+.. csv-table::
+    :delim: |
+    :header: "Name", "Required", "Type", "Format", "Properties", "Description"
+    :widths: 20, 10, 15, 15, 30, 25
+
+        admin_client_cert | No | string |  |  | 
+        admin_client_key | No | string |  |  | 
+        admin_user | No | string |  |  | 
+        cluster_api_endpoint | No | string |  |  | 
+        cluster_ca_cert | No | string |  |  | 
+
+.. _d_b50b514bc3afc99684dcf3a7c2fc8b60:
+
+DeploymentManagerListDto Model Structure
+----------------------------------------
+
+.. csv-table::
+    :delim: |
+    :header: "Name", "Required", "Type", "Format", "Properties", "Description"
+    :widths: 20, 10, 15, 15, 30, 25
+
+        capabilities | No | string |  |  | 
+        capacity | No | string |  |  | 
+        deploymentManagementServiceEndpoint | No | string |  |  | 
+        deploymentManagerId | Yes | string |  |  | Deployment manager ID
+        description | No | string |  |  | 
+        name | No | string |  |  | 
+        profileSupportList | No | array of string |  |  | Profile support list, use default for the return                      endpoint
         supportedLocations | No | string |  |  | 
 
 .. _d_086ee84f2c2cf010478bfc73a87b5e80:
index ca4a19f..6101c7f 100644 (file)
@@ -1,12 +1,12 @@
 .. This work is licensed under a Creative Commons Attribution 4.0 International License.
 .. SPDX-License-Identifier: CC-BY-4.0
-.. Copyright (C) 2021 Wind River Systems, Inc.
+.. Copyright (C) 2021-2022 Wind River Systems, Inc.
 
 INF O2 Service User Guide
 =========================
 
-This guide will introduce the process that make INF O2 interface work with
-SMO.
+This guide will introduce the process that make INF O2 interface work
+with SMO.
 
 -  Assume you have an O2 service with INF platform environment
 
@@ -32,8 +32,9 @@ SMO.
 
    -  Resource pool
 
-      One INF platform have one resource pool, all the resources that belong
-      to this INF platform will be organized into this resource pool
+      One INF platform have one resource pool, all the resources that
+      belong to this INF platform will be organized into this resource
+      pool
 
       Get the resource pool information through this interface
 
@@ -145,7 +146,8 @@ SMO.
    We need to do some preparation to make the helm repo work and include
    our firewall chart inside of the repository.
 
-      Get the DMS Id in the INF O2 service, and set it into bash environment
+      Get the DMS Id in the INF O2 service, and set it into bash
+      environment
 
       .. code:: bash
 
@@ -260,3 +262,143 @@ SMO.
          echo ${NfDeploymentId} # Check the exported deployment id
 
          curl --location --request DELETE "http://${OAM_IP}:30205/o2dms/v1/${dmsId}/O2dms_DeploymentLifecycle/NfDeployment/${NfDeploymentId}"
+
+-  Use Kubernetes Control Client through O2 DMS profile
+
+   Assume you have kubectl command tool installed on your Linux
+   environment.
+
+   And install the ‘jq’ command for your Linux bash terminal. If you are
+   use ubuntu, you can following below command to install it.
+
+   .. code:: bash
+
+      # install the 'jq' command
+      sudo apt-get install -y jq
+
+      # install 'kubectl' command
+      sudo apt-get install -y apt-transport-https
+      echo "deb http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | \
+      sudo tee -a /etc/apt/sources.list.d/kubernetes.list
+      gpg --keyserver keyserver.ubuntu.com --recv-keys 836F4BEB
+      gpg --export --armor 836F4BEB | sudo apt-key add -
+      sudo apt-get update
+      sudo apt-get install -y kubectl
+
+   We need to get Kubernetes profile to set up the kubectl command tool.
+
+   Get the DMS Id in the INF O2 service, and set it into bash
+   environment.
+
+   .. code:: bash
+
+      # Get all DMS ID, and print them with command
+      dmsIDs=$(curl -s -X 'GET' \
+        'http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers' \
+        -H 'accept: application/json' | jq --raw-output '.[]["deploymentManagerId"]')
+      for i in $dmsIDs;do echo ${i};done;
+
+      # Choose one DMS and set it to bash environment, here I set the first one
+      export dmsID=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers" \
+        -H 'accept: application/json' | jq --raw-output '.[0]["deploymentManagerId"]')
+
+      echo ${dmsID} # check the exported DMS Id
+
+   The profile of the ‘kubectl’ need the cluster name, I assume it set
+   to “o2dmsk8s1”.
+
+   It also need the server endpoint address, username and authority, and
+   for the environment that has Certificate Authority validation, it
+   needs the CA data to be set up.
+
+   .. code:: bash
+
+      CLUSTER_NAME="o2dmsk8s1" # set the cluster name
+
+      K8S_SERVER=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers/${dmsID}?profile=sol0018" \
+        -H 'accept: application/json' | jq --raw-output '.["profileData"]["cluster_api_endpoint"]')
+      K8S_CA_DATA=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers/${dmsID}?profile=sol0018" \
+        -H 'accept: application/json' | jq --raw-output '.["profileData"]["cluster_ca_cert"]')
+
+      K8S_USER_NAME=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers/${dmsID}?profile=sol0018" \
+        -H 'accept: application/json' | jq --raw-output '.["profileData"]["admin_user"]')
+      K8S_USER_CLIENT_CERT_DATA=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers/${dmsID}?profile=sol0018" \
+        -H 'accept: application/json' | jq --raw-output '.["profileData"]["admin_client_cert"]')
+      K8S_USER_CLIENT_KEY_DATA=$(curl -s -X 'GET' \
+        "http://${OAM_IP}:30205/o2ims_infrastructureInventory/v1/deploymentManagers/${dmsID}?profile=sol0018" \
+        -H 'accept: application/json' | jq --raw-output '.["profileData"]["admin_client_key"]')
+
+
+      # If you do not want to set up the CA data, you can execute following command without the secure checking
+      # kubectl config set-cluster ${CLUSTER_NAME} --server=${K8S_SERVER} --insecure-skip-tls-verify
+
+      kubectl config set-cluster ${CLUSTER_NAME} --server=${K8S_SERVER}
+      kubectl config set clusters.${CLUSTER_NAME}.certificate-authority-data ${K8S_CA_DATA}
+
+      kubectl config set-credentials ${K8S_USER_NAME}
+      kubectl config set users.${K8S_USER_NAME}.client-certificate-data ${K8S_USER_CLIENT_CERT_DATA}
+      kubectl config set users.${K8S_USER_NAME}.client-key-data ${K8S_USER_CLIENT_KEY_DATA}
+
+      # set the context and use it
+      kubectl config set-context ${K8S_USER_NAME}@${CLUSTER_NAME} --cluster=${CLUSTER_NAME} --user ${K8S_USER_NAME}
+      kubectl config use-context ${K8S_USER_NAME}@${CLUSTER_NAME}
+
+      kubectl get ns # check the command working with this context
+
+
+   Now you can use “kubectl”, it means you set up successful of the
+   Kubernetes client. But, it use the default admin user, so I recommend
+   you create an account for yourself.
+
+   Create a new user and account for K8S with “cluster-admin” role. And,
+   set the token of this user to the base environment.
+
+   .. code:: bash
+
+      USER="admin-user"
+      NAMESPACE="kube-system"
+
+      cat <<EOF > admin-login.yaml
+      apiVersion: v1
+      kind: ServiceAccount
+      metadata:
+        name: ${USER}
+        namespace: kube-system
+      ---
+      apiVersion: rbac.authorization.k8s.io/v1
+      kind: ClusterRoleBinding
+      metadata:
+        name: ${USER}
+      roleRef:
+        apiGroup: rbac.authorization.k8s.io
+        kind: ClusterRole
+        name: cluster-admin
+      subjects:
+      - kind: ServiceAccount
+        name: ${USER}
+        namespace: kube-system
+      EOF
+
+      kubectl apply -f admin-login.yaml
+      TOKEN_DATA=$(kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep ${USER} | awk '{print $1}') | grep "token:" | awk '{print $2}')
+      echo $TOKEN_DATA
+
+   Set the new user in ‘kubectl’ replace the original user, and set the
+   default namespace into the context.
+
+   .. code:: bash
+
+      NAMESPACE=default
+      TOKEN_DATA=<TOKEN_DATA from INF>
+
+      USER="admin-user"
+      CLUSTER_NAME="o2dmsk8s1"
+
+      kubectl config set-credentials ${USER} --token=$TOKEN_DATA
+      kubectl config set-context ${USER}@inf-cluster --cluster=${CLUSTER_NAME} --user ${USER} --namespace=${NAMESPACE}
+      kubectl config use-context ${USER}@inf-cluster
index ae699f4..d6d3918 100644 (file)
@@ -19,15 +19,15 @@ logger = o2logging.get_logger(__name__)
 
 
 class DmsDTO:
-    profile = api_dms_lcm_v1.model("DMSGetDtoProfile", {
-        'cluster_api_endpoint': fields.String(
-            attributes='cluster_api_endpoint'),
-        'cluster_ca_cert': fields.String(attributes='cluster_ca_cert'),
-        'admin_user': fields.String(attributes='admin_user'),
-        'admin_client_cert': fields.String(attributes='admin_client_cert'),
-        'admin_client_key': fields.String(attributes='admin_client_key'),
-        'kube_config_file': fields.String(attributes='kube_config_file')
-    })
+    profile = api_dms_lcm_v1.model("DMSGetDtoProfile", {
+        'cluster_api_endpoint': fields.String(
+            attributes='cluster_api_endpoint'),
+        'cluster_ca_cert': fields.String(attributes='cluster_ca_cert'),
+        'admin_user': fields.String(attributes='admin_user'),
+        'admin_client_cert': fields.String(attributes='admin_client_cert'),
+        'admin_client_key': fields.String(attributes='admin_client_key'),
+    #     # 'kube_config_file': fields.String(attributes='kube_config_file')
+    })
 
     dms_get = api_dms_lcm_v1.model(
         "DmsGetDto",
@@ -40,7 +40,8 @@ class DmsDTO:
             'supportedLocations': fields.String,
             'capabilities': fields.String,
             'capacity': fields.String,
-            'profile': fields.Nested(profile, False, True),
+            # 'profileName': fields.String,
+            # 'profileData': fields.Nested(profile, False, True),
         }
     )
 
index 208636a..c9abac9 100644 (file)
@@ -35,73 +35,46 @@ def deployment_managers(uow: unit_of_work.AbstractUnitOfWork):
 
 
 def deployment_manager_one(deploymentManagerId: str,
-                           uow: unit_of_work.AbstractUnitOfWork,
-                           profile: str = 'params'):
+                           uow: unit_of_work.AbstractUnitOfWork):
+
     # with uow:
     #     res = uow.session.execute(select(deploymentmanager).where(
     #         deploymentmanager.c.deploymentManagerId == deploymentManagerId))
     #     first = res.first()
-    # return None if first is None else dict(first)
-    # with uow:
-    # first = uow.deployment_managers.get(deploymentManagerId)
-    # return first.serialize() if first is not None else None
+    #     return None if first is None else dict(first)
     with uow:
         first = uow.deployment_managers.get(deploymentManagerId)
-        if first is None:
-            return first
-        result = first.serialize()
+        return first.serialize() if first is not None else None
+
+    # profile = profile.lower()
+    # with uow:
+    #     first = uow.deployment_managers.get(deploymentManagerId)
+    #     if first is None:
+    #         return first
+    #     result = first.serialize()
+
+    # profile_data = result.pop("profile", None)
+    # result['profileName'] = 'default'
 
-    if "params" == profile:
-        pass
-    elif "file" == profile and result.hasattr("profile"):
-        p = result.pop("profile", None)
-        result["profile"] = _gen_kube_config(deploymentManagerId, p)
-    else:
-        result.pop("profile", None)
+    # if "sol0018" == profile:
+    #     result['profileName'] = profile
+    #     result['profileData'] = profile_data
+    # elif "file" == profile and result.hasattr("profile"):
+    # p = result.pop("profile", None)
+    # result["profile"] = _gen_kube_config(deploymentManagerId, p)
 
-    return result
+    return result
 
 
 def _gen_kube_config(dmId: str, kubeconfig: dict) -> dict:
 
-    # KUBECONFIG environment variable
-    # reference:
-    # https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/
-    data = {
-        'apiVersion': 'v1',
-        'clusters': [
-            {
-                'cluster': {
-                    'server':
-                    kubeconfig.pop('cluster_api_endpoint', None),
-                    'certificate-authority-data':
-                    kubeconfig.pop('cluster_ca_cert', None),
-                },
-                'name': 'inf-cluster'
-            }],
-        'contexts': [
-            {
-                'context': {
-                    'cluster': 'inf-cluster',
-                    'user': 'kubernetes-admin'
-                },
-                'name': 'kubernetes-admin@inf-cluster'
-            }
-        ],
-        'current-context': 'kubernetes-admin@inf-cluster',
-        'kind': 'Config',
-        'preferences': {},
-        'users': [
-            {
-                'name': kubeconfig.pop('admin_user', None),
-                'user': {
-                    'client-certificate-data':
-                    kubeconfig.pop('admin_client_cert', None),
-                    'client-key-data':
-                    kubeconfig.pop('admin_client_key', None),
-                }
-            }]
-    }
+    data = config.gen_k8s_config_dict(
+        kubeconfig.pop('cluster_api_endpoint', None),
+        kubeconfig.pop('cluster_ca_cert', None),
+        kubeconfig.pop('admin_user', None),
+        kubeconfig.pop('admin_client_cert', None),
+        kubeconfig.pop('admin_client_key', None),
+    )
 
     # Generate a random key for tmp kube config file
     letters = string.ascii_uppercase
index c92bdc1..032ebe9 100644 (file)
@@ -13,9 +13,9 @@
 #  limitations under the License.
 
 # from flask import jsonify
-from os.path import exists
-from flask import send_file
-from flask_restx import Resource, reqparse
+from os.path import exists
+from flask import send_file
+from flask_restx import Resource
 
 from o2dms.api.dms_dto import DmsDTO
 from o2dms.api import dms_lcm_view
@@ -33,8 +33,8 @@ def configure_api_route():
 # ----------  DeploymentManagers ---------- #
 @api_dms_lcm_v1.route("/<deploymentManagerID>")
 @api_dms_lcm_v1.param('deploymentManagerID', 'ID of the deployment manager')
-@api_dms_lcm_v1.param('profile', 'DMS profile',
-                      location='args')
+# @api_dms_lcm_v1.param('profile', 'DMS profile: value supports "sol0018"',
+#                       _in='query', default='sol0018')
 @api_dms_lcm_v1.response(404, 'Deployment manager not found')
 class DmsGetRouter(Resource):
 
@@ -48,31 +48,33 @@ class DmsGetRouter(Resource):
         ))
         bus = MessageBus.get_instance()
 
-        parser = reqparse.RequestParser()
-        parser.add_argument('profile', location='args')
-        args = parser.parse_args()
+        parser = reqparse.RequestParser()
+        parser.add_argument('profile', location='args')
+        args = parser.parse_args()
 
+        # result = dms_lcm_view.deployment_manager_one(
+        #     deploymentManagerID, bus.uow, args.profile)
         result = dms_lcm_view.deployment_manager_one(
-            deploymentManagerID, bus.uow, args.profile)
+            deploymentManagerID, bus.uow)
         if result is not None:
             return result
         api_dms_lcm_v1.abort(404, "Deployment manager {} doesn't exist".format(
             deploymentManagerID))
 
 
-@api_dms_lcm_v1.route("/<deploymentManagerID>/download/<filename>")
-@api_dms_lcm_v1.param('deploymentManagerID',
-                      'ID of the deployment manager')
-@api_dms_lcm_v1.param('filename',
-                      'profile filename')
-@api_dms_lcm_v1.response(404, 'profile not found')
-class DeploymentManagerGetFileRouter(Resource):
-    def get(self, deploymentManagerID, filename):
-        path = "/tmp/kubeconfig_" + filename
+@api_dms_lcm_v1.route("/<deploymentManagerID>/download/<filename>")
+@api_dms_lcm_v1.param('deploymentManagerID',
+                      'ID of the deployment manager')
+@api_dms_lcm_v1.param('filename',
+                      'profile filename')
+@api_dms_lcm_v1.response(404, 'profile not found')
+class DeploymentManagerGetFileRouter(Resource):
+    def get(self, deploymentManagerID, filename):
+        path = "/tmp/kubeconfig_" + filename
 
-        if exists(path):
-            return send_file(path, as_attachment=True)
-        api_dms_lcm_v1.abort(
-            404,
-            "Deployment manager {}'s Kube config file doesn't exist".
-            format(deploymentManagerID))
+        if exists(path):
+            return send_file(path, as_attachment=True)
+        api_dms_lcm_v1.abort(
+            404,
+            "Deployment manager {}'s Kube config file doesn't exist".
+            format(deploymentManagerID))
index 8547f5d..996d82d 100644 (file)
@@ -46,6 +46,7 @@ class DeploymentManager(AgRoot, Serializer):
 
         if 'profile' in d and d['profile'] != '':
             d['profile'] = json.loads(d['profile'])
+        d['profileSupportList'] = ['default', 'sol0018']
 
         return d
 
index c7489ae..dacfcf1 100644 (file)
@@ -144,6 +144,10 @@ class DeploymentManagerDTO:
             'supportedLocations': fields.String,
             'capabilities': fields.String,
             'capacity': fields.String,
+            'profileSupportList': fields.List(
+                fields.String,
+                description='Profile support list, use default for the return \
+                     endpoint'),
         }
     )
 
@@ -154,7 +158,7 @@ class DeploymentManagerDTO:
         'admin_user': fields.String(attributes='admin_user'),
         'admin_client_cert': fields.String(attributes='admin_client_cert'),
         'admin_client_key': fields.String(attributes='admin_client_key'),
-        'kube_config_file': fields.String(attributes='kube_config_file')
+        'kube_config_file': fields.String(attributes='kube_config_file')
     })
 
     deployment_manager_get = api_ims_inventory_v1.model(
@@ -169,7 +173,8 @@ class DeploymentManagerDTO:
             'supportedLocations': fields.String,
             'capabilities': fields.String,
             'capacity': fields.String,
-            'profile': fields.Nested(profile, False, True),
+            'profileName': fields.String,
+            'profileData': fields.Nested(profile, False, True),
         }
     )
 
index 6aa6b2e..3490072 100644 (file)
@@ -168,8 +168,8 @@ class DeploymentManagersListRouter(Resource):
 @api_ims_inventory_v1.route("/deploymentManagers/<deploymentManagerID>")
 @api_ims_inventory_v1.param('deploymentManagerID',
                             'ID of the deployment manager')
-@api_ims_inventory_v1.param('profile', 'DMS profile',
-                            location='args')
+@api_ims_inventory_v1.param('profile', 'DMS profile: value supports "sol0018"',
+                            _in='query')
 @api_ims_inventory_v1.response(404, 'Deployment manager not found')
 class DeploymentManagerGetRouter(Resource):
 
@@ -181,8 +181,11 @@ class DeploymentManagerGetRouter(Resource):
         parser = reqparse.RequestParser()
         parser.add_argument('profile', location='args')
         args = parser.parse_args()
+        profile = (
+            args.profile if args.profile is not None and args.profile != ''
+            else 'default')
         result = ocloud_view.deployment_manager_one(
-            deploymentManagerID, bus.uow, args.profile)
+            deploymentManagerID, bus.uow, profile)
         if result is not None:
             return result
         api_ims_inventory_v1.abort(
index 7213fc4..e129b83 100644 (file)
@@ -105,64 +105,40 @@ def deployment_managers(uow: unit_of_work.AbstractUnitOfWork):
 
 def deployment_manager_one(deploymentManagerId: str,
                            uow: unit_of_work.AbstractUnitOfWork,
-                           profile: str = 'params'):
+                           profile: str = 'default'):
+    profile = profile.lower()
     with uow:
         first = uow.deployment_managers.get(deploymentManagerId)
         if first is None:
             return first
         result = first.serialize()
+        if result is None:
+            return None
 
-    if "params" == profile:
-        pass
-    elif "file" == profile and result.hasattr("profile"):
-        p = result.pop("profile", None)
-        result["profile"] = _gen_kube_config(deploymentManagerId, p)
-    else:
-        result.pop("profile", None)
+    profile_data = result.pop("profile", None)
+    result['profileName'] = 'default'
+
+    if "sol0018" == profile:
+        result['profileName'] = profile
+        result['deploymentManagementServiceEndpoint'] = \
+            profile_data['cluster_api_endpoint']
+        result['profileData'] = profile_data
+    # elif "file" == profile and result.hasattr("profile"):
+        # p = result.pop("profile", None)
+        # result["profile"] = _gen_kube_config(deploymentManagerId, p)
 
     return result
 
 
 def _gen_kube_config(dmId: str, kubeconfig: dict) -> dict:
 
-    # KUBECONFIG environment variable
-    # reference:
-    # https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/
-    data = {
-        'apiVersion': 'v1',
-        'clusters': [
-            {
-                'cluster': {
-                    'server':
-                    kubeconfig.pop('cluster_api_endpoint', None),
-                    'certificate-authority-data':
-                    kubeconfig.pop('cluster_ca_cert', None),
-                },
-                'name': 'inf-cluster'
-            }],
-        'contexts': [
-            {
-                'context': {
-                    'cluster': 'inf-cluster',
-                    'user': 'kubernetes-admin'
-                },
-                'name': 'kubernetes-admin@inf-cluster'
-            }
-        ],
-        'current-context': 'kubernetes-admin@inf-cluster',
-        'kind': 'Config',
-        'preferences': {},
-        'users': [
-            {
-                'name': kubeconfig.pop('admin_user', None),
-                'user': {
-                    'client-certificate-data':
-                    kubeconfig.pop('admin_client_cert', None),
-                    'client-key-data':
-                    kubeconfig.pop('admin_client_key', None),
-                }
-            }]
-    }
+    data = config.gen_k8s_config_dict(
+        kubeconfig.pop('cluster_api_endpoint', None),
+        kubeconfig.pop('cluster_ca_cert', None),
+        kubeconfig.pop('admin_user', None),
+        kubeconfig.pop('admin_client_cert', None),
+        kubeconfig.pop('admin_client_key', None),
+    )
 
     # Generate a random key for tmp kube config file
     letters = string.ascii_uppercase
index 271f5c5..1ae4f5e 100644 (file)
@@ -14,6 +14,7 @@
 
 import uuid
 from unittest.mock import MagicMock
+from o2dms.domain import dms
 
 from o2ims.domain import ocloud, subscription_obj, configuration_obj
 from o2ims.domain import resource_type as rt
@@ -271,15 +272,36 @@ def test_view_deployment_manager_one(mock_uow):
         deployment_manager_id1, uow)
     assert deployment_manager_res is None
 
+    dms_endpoint = "http://o2:30205/o2dms/v1/uuid"
     session.return_value.query.return_value.filter_by.return_value.first.\
         return_value.serialize.return_value = {
             "deploymentManagerId": deployment_manager_id1,
+            "deploymentManagementServiceEndpoint": dms_endpoint,
+            "profile": {}
         }
 
+    # profile default
     deployment_manager_res = ocloud_view.deployment_manager_one(
         deployment_manager_id1, uow)
     assert str(deployment_manager_res.get(
         "deploymentManagerId")) == deployment_manager_id1
+    assert str(deployment_manager_res.get(
+        'deploymentManagementServiceEndpoint')) == dms_endpoint
+    assert deployment_manager_res.get('profile') is None
+
+    # profile sol0018
+    profileName = 'sol0018'
+    cluster_endpoint = "https://test_k8s:6443"
+    session.return_value.query.return_value.filter_by.return_value.first.\
+        return_value.serialize.return_value['profile'] = {
+            "cluster_api_endpoint": cluster_endpoint
+        }
+    deployment_manager_res = ocloud_view.deployment_manager_one(
+        deployment_manager_id1, uow, profile=profileName)
+    assert str(deployment_manager_res.get(
+        'deploymentManagementServiceEndpoint')) == cluster_endpoint
+    assert str(deployment_manager_res.get(
+        "profileName")) == profileName
 
 
 def test_view_subscriptions(mock_uow):