Adding policy manager and a1 sdnc vth 10/4110/2
authorChen, Jackie <jv246a@att.com>
Mon, 15 Jun 2020 18:28:29 +0000 (14:28 -0400)
committerChen, Jackie <jv246a@att.com>
Mon, 15 Jun 2020 18:42:30 +0000 (14:42 -0400)
included otf-helm with this commit. used for otf database deployments
removed mock serever from both vths

Change-Id: Ic1ca76eb89da3d79b100331ae1e11fb98c91113a
Signed-off-by: Chen, Jackie <jv246a@att.com>
90 files changed:
a1-policy-manager-vth/.environ [new file with mode: 0644]
a1-policy-manager-vth/Jenkinsfile [new file with mode: 0644]
a1-policy-manager-vth/app/__init__.py [new file with mode: 0644]
a1-policy-manager-vth/app/errors/__init__.py [new file with mode: 0644]
a1-policy-manager-vth/app/errors/bad_request_exception.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/__init__.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/action_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/error_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/policy_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/response_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/ric_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/service_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/helpers/time_helper.py [new file with mode: 0644]
a1-policy-manager-vth/app/models/__init__.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/__init__.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/errors.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/info.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/policy.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/ric.py [new file with mode: 0644]
a1-policy-manager-vth/app/routes/service.py [new file with mode: 0644]
a1-policy-manager-vth/config.ini [new file with mode: 0644]
a1-policy-manager-vth/doc/a1-documentation.docx [new file with mode: 0644]
a1-policy-manager-vth/docker/Dockerfile [new file with mode: 0644]
a1-policy-manager-vth/docker/container-tag.yaml [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml [new file with mode: 0644]
a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml [new file with mode: 0644]
a1-policy-manager-vth/pip-requirements.txt [new file with mode: 0644]
a1-policy-manager-vth/run.py [new file with mode: 0644]
a1-sdnc-vth/.environ [new file with mode: 0644]
a1-sdnc-vth/Jenkinsfile [new file with mode: 0644]
a1-sdnc-vth/app/__init__.py [new file with mode: 0644]
a1-sdnc-vth/app/errors/__init__.py [new file with mode: 0644]
a1-sdnc-vth/app/errors/bad_request_exception.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/__init__.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/action_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/error_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/policy_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/response_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/ric_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/service_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/helpers/time_helper.py [new file with mode: 0644]
a1-sdnc-vth/app/models/__init__.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/__init__.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/errors.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/info.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/policy.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/ric.py [new file with mode: 0644]
a1-sdnc-vth/app/routes/service.py [new file with mode: 0644]
a1-sdnc-vth/config.ini [new file with mode: 0644]
a1-sdnc-vth/doc/a1-documentation.docx [new file with mode: 0644]
a1-sdnc-vth/docker/Dockerfile [new file with mode: 0644]
a1-sdnc-vth/docker/container-tag.yaml [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml [new file with mode: 0644]
a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml [new file with mode: 0644]
a1-sdnc-vth/pip-requirements.txt [new file with mode: 0644]
a1-sdnc-vth/run.py [new file with mode: 0644]
otf-helm/.gitignore [new file with mode: 0644]
otf-helm/deploy.sh [new file with mode: 0644]
otf-helm/otf/.helmignore [new file with mode: 0644]
otf-helm/otf/Chart.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mongodb/values.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml [new file with mode: 0644]
otf-helm/otf/charts/databases/charts/mysqldb/values.yaml [new file with mode: 0644]
otf-helm/otf/values/development.yaml [new file with mode: 0644]
otf-helm/readme.md [new file with mode: 0644]

diff --git a/a1-policy-manager-vth/.environ b/a1-policy-manager-vth/.environ
new file mode 100644 (file)
index 0000000..0b3fa87
--- /dev/null
@@ -0,0 +1,9 @@
+#if using dotenv change file name to .env and set env variables below\r
+USER=String\r
+PW=String\r
+AUTH=Boolean\r
+PROXY=Boolean\r
+HTTP=String\r
+HTTPS=String\r
+API_URL=String\r
+API_PORT=Int\r
diff --git a/a1-policy-manager-vth/Jenkinsfile b/a1-policy-manager-vth/Jenkinsfile
new file mode 100644 (file)
index 0000000..3244ca2
--- /dev/null
@@ -0,0 +1,158 @@
+#!/usr/bin/env groovy\r
+\r
+/*  Copyright (c) 2019 AT&T Intellectual Property.                             #\r
+#                                                                              #\r
+#   Licensed under the Apache License, Version 2.0 (the "License");            #\r
+#   you may not use this file except in compliance with the License.           #\r
+#   You may obtain a copy of the License at                                    #\r
+#                                                                              #\r
+#       http://www.apache.org/licenses/LICENSE-2.0                             #\r
+#                                                                              #\r
+#   Unless required by applicable law or agreed to in writing, software        #\r
+#   distributed under the License is distributed on an "AS IS" BASIS,          #\r
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #\r
+#   See the License for the specific language governing permissions and        #\r
+#   limitations under the License.                                             #\r
+##############################################################################*/\r
+\r
+\r
+properties([[$class: 'ParametersDefinitionProperty', parameterDefinitions: [\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'PHASE', defaultValue: "BUILD"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'ENV', defaultValue: "dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'MECHID', defaultValue: "m13591_otf_dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'KUBE_CONFIG', defaultValue: "kubeConfig-dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'TILLER_NAMESPACE', defaultValue: "com-att-ecomp-otf-dev"]\r
+]]])\r
+\r
+\r
+    echo "Build branch: ${env.BRANCH_NAME}"\r
+\r
+    node("docker"){\r
+      stage 'Checkout'\r
+        checkout scm\r
+        PHASES=PHASE.tokenize( '_' );\r
+      echo "PHASES : " + PHASES\r
+\r
+\r
+      ARTIFACT_ID="a1-policy-manager-vth";\r
+      VERSION="0.0.1-SNAPSHOT";\r
+      NAMESPACE="com.att.ecomp.otf" //TODO change back to org-otf-oran when done testing\r
+        DOCKER_REGISTRY="dockercentral.it.att.com:5100"\r
+\r
+        if( ENV.equalsIgnoreCase("dev") ){\r
+          IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".dev" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+        }\r
+      if( ENV.equalsIgnoreCase("prod") || ENV.equalsIgnoreCase("prod-dr")){\r
+        IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".prod" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+      }\r
+\r
+      if( ENV.equalsIgnoreCase("st") ){\r
+        IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".st" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+      }\r
+\r
+      echo "Artifact: " + IMAGE_NAME\r
+\r
+        withEnv(["PATH=${env.PATH}:${env.WORKSPACE}/linux-amd64", "HELM_HOME=${env.WORKSPACE}"]) {\r
+\r
+          echo "PATH=${env.PATH}"\r
+            echo "HELM_HOME=${env.HELM_HOME}"\r
+\r
+            if (PHASES.contains("BUILD")){\r
+              dir("./a1-policy-manager-vth"){\r
+\r
+                stage 'Publish Artifact'\r
+\r
+                  withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {\r
+\r
+                    echo "Artifact: " + IMAGE_NAME\r
+\r
+                      sh """\r
+                      docker login $DOCKER_REGISTRY --username $USERNAME --password $PASSWORD\r
+                      docker build -t $IMAGE_NAME .\r
+                      docker push $IMAGE_NAME\r
+                      """\r
+                  }\r
+              }\r
+\r
+            }\r
+\r
+          if (PHASES.contains("DEPLOY") || PHASES.contains("UNDEPLOY")) {\r
+\r
+            stage 'Init Helm'\r
+\r
+              //check if helm exists if not install\r
+              if(fileExists('linux-amd64/helm')){\r
+                sh """\r
+                  echo "helm is already installed"\r
+                  """\r
+              }\r
+              else{\r
+                //download helm\r
+                sh """\r
+                  echo "installing helm"\r
+                  wget  https://storage.googleapis.com/kubernetes-helm/helm-v2.14.3-linux-amd64.tar.gz\r
+                  tar -xf helm-v2.14.3-linux-amd64.tar.gz\r
+                  rm helm-v2.14.3-linux-amd64.tar.gz\r
+                  """\r
+              }\r
+\r
+            withCredentials([file(credentialsId: KUBE_CONFIG, variable: 'KUBECONFIG')]) {\r
+\r
+              dir('a1-policy-manager-vth/helm'){\r
+                //check if charts are valid, and then perform dry run, if successful then upgrade/install charts\r
+\r
+                if (PHASES.contains("UNDEPLOY") ) {\r
+                  stage 'Undeploy'\r
+\r
+                    sh """\r
+                    helm delete --tiller-namespace=$TILLER_NAMESPACE --purge $ARTIFACT_ID\r
+                    """\r
+                }\r
+\r
+                //NOTE Double quotes are used below to access groovy variables like artifact_id and tiller_namespace\r
+                if (PHASES.contains("DEPLOY") ){\r
+                  stage 'Deploy'\r
+                    withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {\r
+\r
+                      sh """\r
+                        echo "Validate Yaml"\r
+                        helm lint $ARTIFACT_ID\r
+\r
+                        echo "View Helm Templates"\r
+                        helm template $ARTIFACT_ID --set appName=$ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+                        echo "Perform Dry Run Of Install"\r
+                        helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install --dry-run $ARTIFACT_ID $ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+\r
+                        echo "Helm Install/Upgrade"\r
+                        helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install $ARTIFACT_ID $ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+                        """\r
+                    }\r
+                }\r
+\r
+              }\r
+            }\r
+          }\r
+\r
+        }\r
+    }\r
diff --git a/a1-policy-manager-vth/app/__init__.py b/a1-policy-manager-vth/app/__init__.py
new file mode 100644 (file)
index 0000000..14b5496
--- /dev/null
@@ -0,0 +1,11 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Import everything using star, methods or anything that should not be\r
+    used by the outside modules should not be imported on the nested\r
+    __init__ files.\r
+"""\r
+from .routes import *\r
+from .errors import *\r
+from .models import *\r
+from .helpers import *\r
diff --git a/a1-policy-manager-vth/app/errors/__init__.py b/a1-policy-manager-vth/app/errors/__init__.py
new file mode 100644 (file)
index 0000000..b491f42
--- /dev/null
@@ -0,0 +1,6 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
+from .bad_request_exception import BadRequestException\r
diff --git a/a1-policy-manager-vth/app/errors/bad_request_exception.py b/a1-policy-manager-vth/app/errors/bad_request_exception.py
new file mode 100644 (file)
index 0000000..a3e3d22
--- /dev/null
@@ -0,0 +1,21 @@
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+class BadRequestException(Exception):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    def __init__(self, status_code=406, message="Not Acceptable Response"):\r
+        cases = {\r
+            401:"Unauthorized",\r
+            403:"Forbidden",\r
+            404:"Not Found",\r
+            423:"Not Operational"\r
+            }\r
+        super().__init__(cases.get(status_code, message))\r
+        self.status_code = status_code\r
+        self.message = message\r
diff --git a/a1-policy-manager-vth/app/helpers/__init__.py b/a1-policy-manager-vth/app/helpers/__init__.py
new file mode 100644 (file)
index 0000000..3313af8
--- /dev/null
@@ -0,0 +1,12 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
+from .error_helper import *\r
+from .response_helper import *\r
+from .time_helper import *\r
+from .policy_helper import *\r
+from .service_helper import *\r
+from .ric_helper import *\r
+from .action_helper import *\r
diff --git a/a1-policy-manager-vth/app/helpers/action_helper.py b/a1-policy-manager-vth/app/helpers/action_helper.py
new file mode 100644 (file)
index 0000000..f952e76
--- /dev/null
@@ -0,0 +1,54 @@
+import json\r
+import ast\r
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+import requests\r
+\r
+def execute_action(request, response_dict, config):\r
+    headers = ResponseHelper.create_headers();\r
+    request_data = request.json\r
+    action_request = request_data.get("action").lower()\r
+    method = request_data.get("method").upper()\r
+    creds = ResponseHelper.get_credentials(request_data, config)\r
+\r
+    proxies = ResponseHelper.get_proxies(config)\r
+    action = "services/keepalive" if action_request == "keepalive" else action_request\r
+    url = ResponseHelper.create_url(config=config, uri_path="/"+action)\r
+#    ret_url = request.args.get('retURL')\r
+\r
+\r
+    json_req = ast.literal_eval(request_data["action_data"]["jsonBody"])\r
+    query_params = ast.literal_eval(request_data["action_data"]["query"])\r
+    current_app.logger.info("Requesting Url: {}, params: {}, body: {}, auth: {}, proxies: {}".format(url, query_params, json_req, creds, proxies))\r
+    try:\r
+        if(method == "GET"):\r
+            res = requests.get(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req)\r
+        elif(method == "POST"):\r
+            res = requests.post(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req)\r
+        elif(method == "PUT"):\r
+            res = requests.put(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req)\r
+        elif(method == "DELETE"):\r
+            res = requests.delete(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req)\r
+        else: \r
+            raise BadRequestException(406, "Method Not Supported")\r
+        response = {\r
+                "status_code":res.status_code,\r
+                "result": res.json()\r
+                }\r
+    except(json.decoder.JSONDecodeError):\r
+        response = {\r
+                "status_code":res.status_code,\r
+                "result": res.reason\r
+                }\r
+    except requests.exceptions.RequestException:\r
+        response = {\r
+                "status_code":504,\r
+                "result": "Something Happned"\r
+                }\r
+    finally:\r
+        response_dict['vthResponse']['resultData'] = response\r
+ #       if ret_url is not None:\r
+ #           ResponseHelper.sendCallback(ret_url,response_dict)\r
+ #           return '',200\r
+        return response_dict\r
diff --git a/a1-policy-manager-vth/app/helpers/error_helper.py b/a1-policy-manager-vth/app/helpers/error_helper.py
new file mode 100644 (file)
index 0000000..b34cedf
--- /dev/null
@@ -0,0 +1,51 @@
+from flask import current_app\r
+import datetime\r
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+\r
+def error_dic(error, status_code, response_message="Something went wrong, vth encountered an error"):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    message = [str(x) for x in error.args]\r
+    error_log={\r
+            "error":{\r
+                "type": error.__class__.__name__,\r
+                "message": message\r
+                }\r
+            }\r
+    response_data = {\r
+        "vthResponse": {\r
+            "testDurationMS": 0,\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            "abstractMessage": "Failed",\r
+            "error":response_message,\r
+            "status_code": status_code,\r
+            "resultData": {}\r
+        }\r
+    }\r
+    current_app.logger.error(error_log)\r
+    return response_data\r
+\r
+def error_dic2(error, status_code=500):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    message = [str(x) for x in error.args]\r
+    response = {\r
+        "status_code" : status_code,\r
+        "success": False,\r
+        "error":{\r
+            "type": error.__class__.__name__,\r
+            "message": message\r
+            }\r
+        }\r
+    return response\r
+\r
diff --git a/a1-policy-manager-vth/app/helpers/policy_helper.py b/a1-policy-manager-vth/app/helpers/policy_helper.py
new file mode 100644 (file)
index 0000000..ea4fedc
--- /dev/null
@@ -0,0 +1,163 @@
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+import requests\r
+\r
+def get_policy_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    if 'id' not in json_data: raise BadRequestException(406, "Request is missing id")\r
+    param = {'id': json_data['id']}\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+\r
+    return response_dict\r
+def put_policy_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    required = {'id', 'jsonBody', 'ric', 'service'}\r
+    param_keys = {'id', 'ric', 'service'}\r
+    optional = {"type"}\r
+    data_keys = param_keys.copy()\r
+    keys = set(json_data.keys())\r
+    if not required <= keys:\r
+        raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    if optional <= keys: data_keys.update(optional)\r
+    param = {}\r
+    body = {}\r
+    for key in data_keys:\r
+        param[key] = json_data[key]\r
+    body['jsonBody'] = json_data['jsonBody']\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.put(url, auth=creds, params=param, json=body)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def delete_policy_using_delete(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {'id': json_data['id']}\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.delete(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
+def get_policy_ids_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            "ric":json_data["ric"] if "ric" in json_data else "",\r
+            "service":json_data["service"] if "service" in json_data else "",\r
+            "type":json_data["type"] if "type" in json_data else ""\r
+            }\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_ids")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
+def get_policy_schema_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {'id': json_data['id']}\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_schema")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def get_policy_schemas_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            "ric":json_data['ric'] if 'ric' in json_data else ""\r
+            }\r
+    #api_response = requests.put(url, credentials=creds, params=param)\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_schemas")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def get_policy_status_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {\r
+            "id":json_data["id"]\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def get_policy_types_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    param = {\r
+            'ric': json_data['ric'] if 'ric' in json_data else ""\r
+            }\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/a1-p/policytypes")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
diff --git a/a1-policy-manager-vth/app/helpers/response_helper.py b/a1-policy-manager-vth/app/helpers/response_helper.py
new file mode 100644 (file)
index 0000000..2058347
--- /dev/null
@@ -0,0 +1,221 @@
+import ast\r
+import requests\r
+from configparser import ConfigParser\r
+import os\r
+import datetime\r
+import json\r
+from flask import request, jsonify, current_app\r
+from app.helpers.time_helper import unix_time_millis\r
+from app.errors.bad_request_exception import BadRequestException\r
+import requests\r
+\r
+"""\r
+    Module Info:\r
+"""\r
+def create_headers(enable_cache=True, content_type="application/json", connection="Keep-Alive"):\r
+    headers = {'Cache-Control':'no-cache, no-store, must-revalidate', "Pragma":"no-cache", "Expires":"0"} if not enable_cache else {}\r
+    headers['content-type'] = content_type\r
+    headers['connection'] = connection\r
+    return headers\r
+def create_url(config=None, uri_path = "/", url_string=None):\r
+    return config['api']['base_url'] +":"+ config['api']['port']+uri_path if url_string is None else url_string\r
+\r
+def valid_string_json(string, response_message="Invalid json string in query or jsonBody, format requires quoted json object e.g. \"{'key':'value, key2:{'innerKey':'innerValue'}}\""):\r
+    try:\r
+        string_to_dict = ast.literal_eval(string)\r
+    except(Exception):\r
+        raise BadRequestException(406, response_message)\r
+    return True\r
+def route_check(config=None, get_function=None, post_function=None, put_function=None, delete_function=None):\r
+    """\r
+     Info:\r
+        Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse\r
+     Arguments (**kwargs): pass in the specified key(s) and  method(s) that handle the type of method, method must be allowed by route decorator\r
+        get_function => type: function\r
+        put_function => type: function\r
+        delete_function => type: function\r
+    Returns:\r
+        returns the return of the function call, typically a jsonified response.\r
+        you can capture response in a var and execute logic or you can just return the function call/response \r
+    E.G.:\r
+        response = route_check(post_function = handle_post)\r
+        return route_check(get_function = handle_get, post_function = handle_post)\r
+    """\r
+    if not request.is_json: raise BadRequestException(406, "Invalid Json Request")\r
+\r
+\r
+    response_dict = vth_response_dic()\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    status_code = 200\r
+    ret_url = request.args.get('retURL')\r
+\r
+    query = ""\r
+    json_body = ""\r
+    request_data = request.json\r
+    json_keys = set(request_data)\r
+    action_request = request_data.get("action").lower()\r
+    valid_actions = {"policies", "policy", "policy_ids", "policy_schema", "policy_schemas", "policy_status", "policy_types", "ric", "rics", "service", "services", "keepalive", "status" }\r
+    required_keys = {"action", "method", "auth", "action_data"}\r
+\r
+    #check for valid action and json request contains required keys\r
+    if not required_keys <= json_keys: raise BadRequestException(406, "Json request is missing required keys {}".format(required_keys))\r
+    if not action_request in valid_actions: raise BadRequestException(406, "Action is not supported {}".format(action_request))\r
+    #check request's action_data key contains required keys\r
+    if 'query' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody ")\r
+    if 'jsonBody' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody")\r
+\r
+    query = request.json['action_data']['query'] if 'query' in request.json['action_data'] else ""\r
+    json_body = request.json['action_data']['jsonBody'] if 'jsonBody' in request.json['action_data'] else ""\r
+\r
+\r
+    if valid_string_json(query) and valid_string_json(json_body):\r
+        if(request.method == 'GET'):\r
+            response_dict = get_function(request, response_dict, config)\r
+        elif(request.method == 'POST'):\r
+            response_dict = post_function(request, response_dict, config)\r
+        elif(request.method == 'PUT'):\r
+            response_dict = put_function(request, response_dict, config)\r
+        elif(request.method == 'DELETE'):\r
+            response_dict = delete_function(request, response_dict, config)\r
+    else:\r
+        raise BadRequestException(406, "Invalid JSON Strings")\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    response_dict['vthResponse']['testDurationMS'] = end_time-start_time\r
+    if ret_url is not None:\r
+        sendCallback(ret_url,response_dict)\r
+        return '',200\r
+    return jsonify(response_dict), status_code\r
+\r
+def get_proxies(config):\r
+    proxy_enabled = config.getboolean('resource', 'proxy_enabled')\r
+    req_proxies = {\r
+        'http': None,\r
+        'https': None\r
+    }\r
+    if not proxy_enabled:\r
+        return None\r
+    else:\r
+        req_proxies['http'] = config['resource']['http_proxy']         \r
+        req_proxies['https'] = config['resource']['https_proxy']\r
+        return req_proxies\r
+def get_credentials(json_data, config):\r
+    auth_enabled = config.getboolean('auth', 'creds_enabled')\r
+    if not auth_enabled:\r
+        return None\r
+    else:\r
+        username = config['auth']['username'] if 'username' not in json_data['auth'] else json_data['auth']['username']\r
+        password = config['auth']['password'] if 'password' not in json_data['auth'] else json_data['auth']['password']\r
+        return (username, password)\r
+def vth_response_dic():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    response_data = {\r
+        "vthResponse": {\r
+            "testDurationMS": "",\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            "abstractMessage": "Success",\r
+            "resultData": {}\r
+        }\r
+    }\r
+    return response_data\r
+\r
+def sendCallback(url, data):\r
+    try:\r
+        if type(data) is not dict:\r
+            data = {"msg": data}\r
+        current_app.logger.info("sending callback")\r
+        requests.post(url, json=data)\r
+    except Exception as e:\r
+        current_app.logger.info(e)\r
+    return\r
+\r
+def get_request_data(request):\r
+    if not request.is_json:\r
+        raise ValueError("request must be json")\r
+    requestData = request.get_json()\r
+    return requestData\r
+\r
+\r
+def valid_json(data):\r
+\r
+    try:\r
+        _ = json.loads(data)\r
+    except ValueError as e:\r
+        return False\r
+    return True\r
+def get_config(config_file_name):\r
+    config = ConfigParser(os.environ)\r
+    config.read(config_file_name)\r
+    return config\r
+\r
+def validate_request(request_data, isPublish=True):\r
+    return\r
+    missing_params = []\r
+\r
+    if 'topic_name' not in request_data:\r
+        missing_params.append("topic_name")\r
+    if isPublish:\r
+        if 'data' not in request_data:\r
+            missing_params.append('data')\r
+    else:\r
+        if 'consumer_group' not in request_data:\r
+            missing_params.append('consumer_group')\r
+        if 'consumer_id' not in request_data:\r
+            missing_params.append('consumer_id')\r
+\r
+    if missing_params:\r
+        err_msg = '{} request requires the following: '.format('publish' if isPublish else 'subscribe')\r
+        err_msg += ','.join(missing_params)\r
+        raise KeyError(err_msg)\r
+\r
+\r
+def build_url(config, request_data, is_publish=True):\r
+    if is_publish:\r
+        base_path = config['resource']['base_address'] + config['resource']['publish']\r
+        topic_name = request_data['topic_name']\r
+        publish_address = base_path.format(topic_name=topic_name)\r
+        return publish_address\r
+\r
+    base_path = config['resource']['base_address'] + config['resource']['subscribe']\r
+    topic_name = request_data['topic_name']\r
+    consumer_group = request_data['consumer_group']\r
+    consumer_id = request_data['consumer_id']\r
+    subscribe_address = base_path.format(topic_name=topic_name, consumer_group=consumer_group, consumer_id=consumer_id)\r
+    if ('timeout' in request_data):\r
+        subscribe_address = (subscribe_address + '?timeout={}').format(request_data['timeout'])\r
+    return subscribe_address\r
+\r
+\r
+def send_request(url, config, is_subscribe_request=False, payload=None):\r
+    # setup default values\r
+    auth_enabled = config.getboolean('auth', 'auth_enabled')\r
+    proxy_enabled = config.getboolean('resource', 'proxy_enabled')\r
+    username = ''\r
+    password = ''\r
+    req_proxies = {\r
+        'http': None,\r
+        'https': None\r
+    }\r
+    # place proxy and authentication information\r
+    if auth_enabled:\r
+        username = config['auth']['username']\r
+        password = config['auth']['password']\r
+    if proxy_enabled:\r
+        req_proxies['http'] = config['resource']['http_proxy']\r
+        req_proxies['https'] = config['resource']['https_proxy']\r
+\r
+    # for subscribe request\r
+    if is_subscribe_request:\r
+        return requests.get(url,\r
+                            auth=(username, password) if auth_enabled else None,\r
+                            proxies=req_proxies if proxy_enabled else None)\r
+    # for publish request\r
+    req_headers = {'Content-type': 'application/json'}\r
+    return requests.post(url,\r
+                         json=payload,\r
+                         auth=(username, password) if auth_enabled else None,\r
+                         proxies=req_proxies if proxy_enabled else None,\r
+                         headers=req_headers)\r
diff --git a/a1-policy-manager-vth/app/helpers/ric_helper.py b/a1-policy-manager-vth/app/helpers/ric_helper.py
new file mode 100644 (file)
index 0000000..47d55c2
--- /dev/null
@@ -0,0 +1,37 @@
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+def get_ric_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'managedElementId'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'managedElementId': json_data['managedElementId']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def get_rics_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+    param = {\r
+            "policyType": json_data["policyType"] if "policyType" in json_data else ""\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
diff --git a/a1-policy-manager-vth/app/helpers/service_helper.py b/a1-policy-manager-vth/app/helpers/service_helper.py
new file mode 100644 (file)
index 0000000..16d9b92
--- /dev/null
@@ -0,0 +1,78 @@
+\r
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+def get_services_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            'name': json_data['name'] if 'name' in json_data else ""\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def delete_services_using_delete(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'name'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'name': json_data['name']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def put_service_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'registrationInfo'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'registrationInfo': json_data['registrationInfo']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+\r
+def keep_alive_service_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'name'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'name': json_data['name']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
diff --git a/a1-policy-manager-vth/app/helpers/time_helper.py b/a1-policy-manager-vth/app/helpers/time_helper.py
new file mode 100644 (file)
index 0000000..b882d0b
--- /dev/null
@@ -0,0 +1,24 @@
+"""\r
+    Module Info:\r
+"""\r
+import datetime\r
+\r
+def unix_time_millis(d_time):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    epoch = datetime.datetime.utcfromtimestamp(0)\r
+    return (d_time - epoch).total_seconds() * 1000.0\r
+\r
+def timed_function(func):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    func()\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    return end_time - start_time\r
diff --git a/a1-policy-manager-vth/app/models/__init__.py b/a1-policy-manager-vth/app/models/__init__.py
new file mode 100644 (file)
index 0000000..52319a0
--- /dev/null
@@ -0,0 +1,6 @@
+\r
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
diff --git a/a1-policy-manager-vth/app/routes/__init__.py b/a1-policy-manager-vth/app/routes/__init__.py
new file mode 100644 (file)
index 0000000..89419e1
--- /dev/null
@@ -0,0 +1,19 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Routes need to be exported to be usable, if removed, routes will not be found and response\r
+    will be a 500.\r
+    ROUTE order matters, because ROUTE is like a global var used by all the other modules\r
+    it needs to be above them all\r
+"""\r
+from flask import Blueprint\r
+from app.helpers.response_helper import get_config\r
+\r
+ROUTES = Blueprint('routes', __name__)\r
+config = get_config("config.ini")\r
+\r
+from .policy import *\r
+from .ric import *\r
+from .service import *\r
+from .info import *\r
+from .errors import ERRORS\r
diff --git a/a1-policy-manager-vth/app/routes/errors.py b/a1-policy-manager-vth/app/routes/errors.py
new file mode 100644 (file)
index 0000000..43e1ec1
--- /dev/null
@@ -0,0 +1,33 @@
+"""\r
+Module Info:\r
+"""\r
+from flask import jsonify, current_app, Blueprint\r
+from app.helpers.error_helper import error_dic\r
+from app.errors.bad_request_exception import BadRequestException\r
+import traceback\r
+\r
+ERRORS = Blueprint('errors', __name__)\r
+\r
+@ERRORS.app_errorhandler(BadRequestException)\r
+def handle_bad_request(error):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.info(error)\r
+    response = error_dic(error, error.status_code, error.message)\r
+    print(traceback.format_exc())\r
+    return jsonify(response), error.status_code\r
+\r
+@ERRORS.app_errorhandler(Exception)\r
+def handle_error(error):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    status_code = 500\r
+    response = error_dic(error, status_code)\r
+    print(traceback.format_exc())\r
+    return jsonify(response), status_code\r
diff --git a/a1-policy-manager-vth/app/routes/info.py b/a1-policy-manager-vth/app/routes/info.py
new file mode 100644 (file)
index 0000000..7090cf3
--- /dev/null
@@ -0,0 +1,76 @@
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+import json\r
+import datetime\r
+from flask import current_app, jsonify, request\r
+import time\r
+import requests\r
+from app.errors.bad_request_exception import BadRequestException\r
+from app.helpers.time_helper import unix_time_millis, timed_function\r
+from app.helpers.response_helper import vth_response_dic\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import action_helper as Info\r
+from . import config, ROUTES\r
+\r
+\r
+@ROUTES.route("/handle_action", methods=['POST'])\r
+def handle_action_request():\r
+    return ResponseHelper.route_check(config=config, post_function = Info.execute_action)\r
+\r
+\r
+@ROUTES.route("/", methods=['GET'])\r
+def get_base():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.info(request.method)\r
+    response = vth_response_dic()\r
+    data = current_app.url_map\r
+    rules = []\r
+    methods_list = []\r
+    for rule in data.iter_rules():\r
+        ma = {rule.rule:[]}\r
+        for val in rule.methods:\r
+            if (val != "OPTIONS") and (val !="HEAD"):\r
+                #print(val)\r
+                ma[rule.rule].append(val)\r
+        rules.append(ma)\r
+\r
+    #    methods_set.add(rule.methods)\r
+        #print(rule.methods)\r
+    #print(rules)\r
+    response["vthResponse"]["resultData"] = rules\r
+    #current_app.logger.info(current_app.url_map)\r
+    current_app.logger.debug("hit health point")\r
+    return jsonify(response)\r
+\r
+@ROUTES.route("/health", methods=['GET'])\r
+def get_health():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.debug("hit health point")\r
+    return "UP"\r
+\r
+@ROUTES.route("/status", methods=['GET'])\r
+def get_status():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    suma = lambda: time.sleep(1)\r
+    #current_app.logger.info(current_app.url_map)\r
+    current_app.logger.info(unix_time_millis(datetime.datetime.now()))\r
+    current_app.logger.info(timed_function(suma))\r
+    current_app.logger.debug("some stuff")\r
+    #raise Exception("some error")\r
+    raise BadRequestException()\r
+    return "Running"\r
diff --git a/a1-policy-manager-vth/app/routes/policy.py b/a1-policy-manager-vth/app/routes/policy.py
new file mode 100644 (file)
index 0000000..588397f
--- /dev/null
@@ -0,0 +1,225 @@
+\r
+import datetime\r
+import json\r
+import logging\r
+from logging import FileHandler\r
+import os\r
+\r
+import requests\r
+from flask import Flask, request, jsonify\r
+from . import config, ROUTES\r
+from app.helpers import policy_helper as Policy\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+\r
+\r
+def sendCallback(url, data):\r
+    try:\r
+        if type(data) is not dict:\r
+            data = {"msg": data}\r
+        app.logger.info("sending callback")\r
+        requests.post(url, json=data)\r
+    except Exception as e:\r
+        app.logger.info(e)\r
+    return\r
+\r
+def unix_time_millis(dt):\r
+    epoch = datetime.datetime.utcfromtimestamp(0)\r
+    return (dt - epoch).total_seconds() * 1000.0\r
+\r
+\r
+def route_check2(get_function=None, post_function=None, put_function=None, delete_function=None):\r
+    """\r
+     Info:\r
+        Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse\r
+     Arguments (**kwargs): pass in the specified key(s) and  method(s) that handle the type of method, method must be allowed by route decorator\r
+        get_function => type: function\r
+        put_function => type: function\r
+        delete_function => type: function\r
+    Returns:\r
+        returns the return of the function call, typically a jsonified response.\r
+        you can capture response in a var and execute logic or you can just return the function call/response \r
+    E.G.:\r
+        response = route_check(post_function = handle_post)\r
+        return route_check(get_function = handle_get, post_function = handle_post)\r
+    """\r
+    response_dict = ResponseHelper.vth_response_dic()\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    status_code = 200\r
+    if request.is_json and ResponseHelper.valid_json(request.data):\r
+        if(request.method == 'GET'):\r
+            response_dict = get_function(request, response_dict, config)\r
+        elif(request.method == 'POST'):\r
+            response_dict = post_function(request, response_dict, config)\r
+        elif(request.method == 'PUT'):\r
+            response_dict = put_function(request, response_dict, config)\r
+        elif(request.method == 'DELETE'):\r
+            response_dict = delete_function(request, response_dict, config)\r
+    else:\r
+        raise BadRequestException(406, "Invalid Json")\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    response_dict['vthResponse']['testDurationMS'] = end_time-start_time\r
+    return jsonify(response_dict), status_code\r
+\r
+\r
+@ROUTES.route("/policies", methods=['GET'])\r
+def policies():\r
+    pass\r
+\r
+@ROUTES.route("/policy", methods=['GET', 'PUT', 'DELETE'])\r
+def handle_policy():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_using_get, put_function = Policy.put_policy_using_put, delete_function=Policy.delete_policy_using_delete)\r
+    \r
+\r
+@ROUTES.route("/policy_ids", methods=['GET'])\r
+def handle_policy_ids():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_ids_using_get)\r
+\r
+@ROUTES.route("/policy_schemas", methods=['GET'])\r
+def handle_policy_schemas():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schemas_using_get)\r
+\r
+@ROUTES.route("/policy_schema", methods=['GET'])\r
+def handle_policy_schema():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schema_using_get)\r
+\r
+@ROUTES.route("/policy_status", methods=['GET'])\r
+def handle_policy_status():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_status_using_get)\r
+\r
+@ROUTES.route("/policy_types", methods=['GET'])\r
+def handle_policy_types():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_types_using_get)\r
+\r
+\r
+@ROUTES.route("/", methods=['POST'])\r
+def executeRicRequest():\r
+    response_data = {\r
+        'vthResponse': {\r
+            'testDuration': '',\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            'abstractMessage': '',\r
+            'resultData': {}\r
+        }\r
+    }\r
+\r
+    startTime = unix_time_millis(datetime.datetime.now())\r
+    ret_url = request.args.get('retURL')\r
+    try:\r
+        if not request.is_json:\r
+            raise ValueError("request must be json")\r
+\r
+        requestData = request.get_json()\r
+\r
+        app.logger.info("A1 requestData:" + str(requestData))\r
+\r
+        action = requestData['action'].lower()\r
+        _check_incoming_request(requestData)\r
+\r
+        os.environ['NO_PROXY'] = '127.0.0.1'  # TODO testing purpose w/ mock server. Needs to remove on final version\r
+        with open('config.json') as configFile:\r
+            config = json.load(configFile)\r
+\r
+        baseAddress = config['base_address']\r
+        if action == 'health_check' or action == 'list_policy':\r
+            res = requests.get(baseAddress + config['actions_path'][action])\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if action == 'health_check':\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'list_policy_instance':\r
+            res = requests.get(baseAddress + config['actions_path'][action]\r
+                               .format(policy_type_id=requestData['policy_type_id']))\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'get_policy_instance_status':\r
+            res = requests.get(baseAddress + config['actions_path'][action]\r
+                               .format(policy_type_id=requestData['policy_type_id'],\r
+                                       policy_instance_id=requestData['policy_instance_id']))\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'edit_policy':\r
+            res = _send_edit_request(requestData, config)\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if requestData['request_type'].lower() == 'get' and res.status_code == 200:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+        elif action == 'edit_policy_instance':\r
+            res = _send_edit_request(requestData, config)\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if requestData['request_type'].lower() == 'get' and res.status_code == 200:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+\r
+    except Exception as ex:\r
+        endTime = unix_time_millis(datetime.datetime.now())\r
+        totalTime = endTime - startTime\r
+        response_data['vthResponse']['testDuration'] = totalTime\r
+        response_data['vthResponse']['abstractMessage'] = str(ex)\r
+        return jsonify(response_data)\r
+\r
+    endTime = unix_time_millis(datetime.datetime.now())\r
+    totalTime = endTime - startTime\r
+\r
+    response_data['vthResponse']['testDuration'] = totalTime\r
+\r
+    if ret_url is not None:\r
+        sendCallback(ret_url, response_data)\r
+        return '', 200\r
+\r
+    return jsonify(response_data), 200\r
+\r
+\r
+def _send_edit_request(request_data, config):\r
+    baseAddress = config['base_address']\r
+    path = ''\r
+    action = request_data['action']\r
+    policy_type_id = request_data['policy_type_id']\r
+    request_type = request_data['request_type']\r
+    if action == "edit_policy":\r
+        path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id)\r
+    if action == 'edit_policy_instance':\r
+        instance_id = request_data['policy_instance_id']\r
+        path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id,\r
+                                                                   policy_instance_id=instance_id)\r
+    if request_type == 'get':\r
+        return requests.get(path)\r
+    if request_type == 'put':\r
+        payload = request_data['payload']\r
+        return requests.put(path, payload)\r
+    if request_type == 'delete':\r
+        return requests.delete(path)\r
+\r
+\r
+def _check_incoming_request(requestData):  # check if the request is valid\r
+    if 'action' not in requestData:\r
+        raise KeyError('no action was specify')\r
+\r
+    action = requestData['action'].lower()\r
+    edit_actions = ['edit_policy', 'edit_policy_instance']\r
+    requires_policy_id = ['edit_policy', 'list_policy_instance'\r
+        , 'edit_policy_instance', 'get_policy_instance_status']\r
+    requires_policy_instance_id = ['edit_policy_instance', 'get_policy_instance_status']\r
+    possible_actions = ['health_check', 'list_policy', 'edit_policy', 'list_policy_instance'\r
+        , 'edit_policy_instance', 'get_policy_instance_status']\r
+    possible_request_type = ['get', 'put', 'delete']\r
+\r
+    if action not in possible_actions:\r
+        raise KeyError("invalid action")\r
+    if action in edit_actions:  # request type is required\r
+        if 'request_type' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a request type')\r
+        if requestData['request_type'] not in possible_request_type:\r
+            raise KeyError('this request_type: ' + requestData['request_type'] + ' is not valid')\r
+        if requestData['request_type'] == 'put' and 'payload' not in requestData:\r
+            raise KeyError('put request requires a payload')\r
+    if action in requires_policy_id:\r
+        if 'policy_type_id' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a policy_type_id')\r
+    if action in requires_policy_instance_id:\r
+        if 'policy_instance_id' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a policy_instance_id')\r
diff --git a/a1-policy-manager-vth/app/routes/ric.py b/a1-policy-manager-vth/app/routes/ric.py
new file mode 100644 (file)
index 0000000..8441ac6
--- /dev/null
@@ -0,0 +1,12 @@
+\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import ric_helper as Ric\r
+from . import config, ROUTES\r
+\r
+@ROUTES.route("/ric", methods=['GET'])\r
+def handle_ric():\r
+    return ResponseHelper.route_check(config=config, get_function=Ric.get_ric_using_get)\r
+\r
+@ROUTES.route("/rics", methods=['GET'])\r
+def handle_rics():\r
+    return ResponseHelper.route_check(config=config, get_function=Ric.get_rics_using_get)\r
diff --git a/a1-policy-manager-vth/app/routes/service.py b/a1-policy-manager-vth/app/routes/service.py
new file mode 100644 (file)
index 0000000..e06bf94
--- /dev/null
@@ -0,0 +1,16 @@
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import service_helper as Service\r
+from . import config, ROUTES\r
+\r
+@ROUTES.route("/services", methods=['GET', 'DELETE'])\r
+def handleS_services():\r
+    return ResponseHelper.route_check(config=config, get_function=Service.get_services_using_get, delete_function=Service.delete_services_using_delete)\r
+\r
+\r
+@ROUTES.route("/service", methods=['PUT'])\r
+def handle_service():\r
+    return ResponseHelper.route_check(config=config, put_function=Service.put_service_using_put)\r
+\r
+@ROUTES.route("/services/keepalive", methods=['PUT'])\r
+def handle_services_keepalive():\r
+    return ResponseHelper.route_check(config=config, put_function=Service.keep_alive_service_using_put)\r
diff --git a/a1-policy-manager-vth/config.ini b/a1-policy-manager-vth/config.ini
new file mode 100644 (file)
index 0000000..e9bc817
--- /dev/null
@@ -0,0 +1,14 @@
+[auth]\r
+creds_enabled= %(USE_CRED)s\r
+username = %(USER)s\r
+password = %(PW)s\r
+[api]\r
+base_url= %(API_URL)s\r
+port= %(API_PORT)s\r
+[resource]\r
+proxy_enabled = %(USE_PROXY)s\r
+https_proxy= %(HTTPS)s\r
+http_proxy= %(HTTP)s\r
+base_address = %(API_URL)s\r
+publish = /{topic_name}\r
+subscribe = /{topic_name}/{consumer_group}/{consumer_id}\r
diff --git a/a1-policy-manager-vth/doc/a1-documentation.docx b/a1-policy-manager-vth/doc/a1-documentation.docx
new file mode 100644 (file)
index 0000000..dada0b5
Binary files /dev/null and b/a1-policy-manager-vth/doc/a1-documentation.docx differ
diff --git a/a1-policy-manager-vth/docker/Dockerfile b/a1-policy-manager-vth/docker/Dockerfile
new file mode 100644 (file)
index 0000000..960368c
--- /dev/null
@@ -0,0 +1,18 @@
+FROM python:3.7.4\r
+\r
+RUN python --version\r
+\r
+ADD pip-requirements.txt pip-requirements.txt\r
+ADD app app\r
+ADD config.ini config.ini\r
+ADD run.py run.py\r
+\r
+RUN mkdir -p /otf/logs\r
+\r
+RUN python -m pip install --proxy http://one.proxy.att.com:8080 -r pip-requirements.txt\r
+\r
+ENV USER=default_user\r
+ENV PW=default_pass\r
+\r
+\r
+ENTRYPOINT ["python", "run.py"]\r
diff --git a/a1-policy-manager-vth/docker/container-tag.yaml b/a1-policy-manager-vth/docker/container-tag.yaml
new file mode 100644 (file)
index 0000000..ee078db
--- /dev/null
@@ -0,0 +1,15 @@
+---
+#   Copyright (c) 2019 AT&T Intellectual Property.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+tag: 0.0.1
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore b/a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore
new file mode 100644 (file)
index 0000000..daebc7d
--- /dev/null
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.\r
+# This supports shell glob matching, relative path matching, and\r
+# negation (prefixed with !). Only one pattern per line.\r
+.DS_Store\r
+# Common VCS dirs\r
+.git/\r
+.gitignore\r
+.bzr/\r
+.bzrignore\r
+.hg/\r
+.hgignore\r
+.svn/\r
+# Common backup files\r
+*.swp\r
+*.bak\r
+*.tmp\r
+*~\r
+# Various IDEs\r
+.project\r
+.idea/\r
+*.tmproj\r
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml
new file mode 100644 (file)
index 0000000..66e0b29
--- /dev/null
@@ -0,0 +1,5 @@
+apiVersion: v1\r
+appVersion: "1.0"\r
+description: A Helm chart for the a1 policy manager Virtual Test Head \r
+name: a1-policy-manager-vth\r
+version: 0.0.1\r
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..89d053d
--- /dev/null
@@ -0,0 +1,137 @@
+apiVersion: extensions/v1beta1\r
+kind: Deployment\r
+metadata:\r
+  name: {{ .Values.appName}}\r
+  namespace: {{.Values.namespace}}\r
+  labels:\r
+    app: {{ .Values.appName}}\r
+    version: {{.Values.version}}\r
+spec:\r
+  revisionHistoryLimit: 1\r
+  minReadySeconds: 10\r
+  strategy:\r
+  # indicate which strategy we want for rolling update\r
+    type: RollingUpdate\r
+    rollingUpdate:\r
+      maxSurge: 0\r
+      maxUnavailable: 1\r
+  replicas: {{ .Values.replicas}}\r
+  selector:\r
+    matchLabels:\r
+      app: {{ .Values.appName}}\r
+      version: {{.Values.version}}\r
+  template:\r
+    metadata:\r
+      labels:\r
+        app: {{ .Values.appName}}\r
+        version: {{.Values.version}}\r
+    spec:\r
+      serviceAccount: default\r
+      volumes:\r
+      - name: {{ .Values.appName}}-cert-volume\r
+        secret:\r
+          secretName: {{.Values.sharedCert}}\r
+          optional: true\r
+          items:\r
+          - key: PEM_CERT\r
+            path: otf.pem\r
+          - key: PEM_KEY\r
+            path: privateKey.pem\r
+#      {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} TODO UNCOMMENT WHEN PUSHING TO ORAN\r
+#      {{else}}\r
+#      - name: logging-pvc\r
+#        persistentVolumeClaim:\r
+#          {{if eq .Values.env "prod"}}\r
+#          claimName: {{ .Values.pvc.prod | quote }}\r
+#          {{ else }}\r
+#          claimName: {{ .Values.pvc.dev | quote }}\r
+#          {{ end }}\r
+#      {{end}}\r
+      containers:\r
+      - name: {{ .Values.appName}}\r
+        image: {{ .Values.image}}\r
+        imagePullPolicy: Always\r
+        ports:\r
+        - name: http\r
+          containerPort: 6000\r
+          nodePort: {{.Values.nodePort}}\r
+          protocol: TCP\r
+#        {{ if eq .Values.env "st"}} TODO UNCOMMENT FOR ORAN?\r
+#        resources:\r
+#          limits:\r
+#            memory: "512Mi"\r
+#            cpu: "500m"\r
+#          requests:\r
+#            memory: "256Mi"\r
+#            cpu: "100m"\r
+#        {{else}}\r
+#        resources:\r
+#          limits:\r
+#            memory: "1Gi"\r
+#            cpu: "1"\r
+#          requests:\r
+#            memory: "1Gi"\r
+#            cpu: "1"\r
+#        {{end}}\r
+        env:\r
+        - name: NAMESPACE\r
+          value: {{.Values.namespace}}\r
+        - name: APP_NAME\r
+          value: {{ .Values.appName}}\r
+        - name: APP_VERSION\r
+          value: {{.Values.version}}\r
+        - name: USE_CRED\r
+          value: {{.Values.auth.enabled | quote }}\r
+        - name: USER\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.appName}}\r
+              key: api_user\r
+              optional: true\r
+        - name: PW\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.appName}}\r
+              key: api_pass\r
+              optional: true\r
+        - name: USE_PROXY\r
+          value: {{.Values.proxy.enabled | quote }}\r
+        - name: HTTPS\r
+          value: {{.Values.proxy.https | quote }}\r
+        - name: HTTP\r
+          value: {{.Values.proxy.http | quote }}\r
+        - name: API_URL\r
+          value: {{.Values.api.base_url}}\r
+        - name: API_PORT\r
+          value: {{.Values.api.port | quote }}\r
+        volumeMounts:\r
+        - name: {{.Values.appName}}-cert-volume\r
+          mountPath: /opt/cert\r
+#        {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}}\r
+#        {{else}}\r
+#        - name: logging-pvc\r
+#          mountPath: "/otf/logs"\r
+#        {{end}}\r
+        livenessProbe:\r
+          httpGet:\r
+            path: {{.Values.health}}\r
+            port: http\r
+            scheme: HTTP\r
+            httpHeaders:\r
+              - name: X-Custom-Header\r
+                value: Alive\r
+          initialDelaySeconds: 30\r
+          timeoutSeconds: 30\r
+          periodSeconds: 30\r
+        readinessProbe:\r
+          httpGet:\r
+            path: {{.Values.health}}\r
+            port: http\r
+            scheme: HTTP\r
+            httpHeaders:\r
+              - name: X-Custom-Header\r
+                value: Ready\r
+          initialDelaySeconds: 30\r
+          timeoutSeconds: 30\r
+          periodSeconds: 30\r
+      restartPolicy: Always\r
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml
new file mode 100644 (file)
index 0000000..1aabe3d
--- /dev/null
@@ -0,0 +1,9 @@
+\r
+apiVersion: v1\r
+kind: Secret\r
+metadata:\r
+  name: {{ .Values.appName}}\r
+type: Opaque\r
+data:\r
+  api_user: {{ .Values.auth.user | b64enc }}\r
+  api_pass: {{ .Values.auth.pw | b64enc }}\r
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml
new file mode 100644 (file)
index 0000000..291f9fc
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: v1\r
+kind: Service\r
+metadata:\r
+  name: {{ .Values.appName }}\r
+  namespace: {{ .Values.namespace}}\r
+  labels:\r
+    app: {{ .Values.appName }}\r
+    version: {{ .Values.version}}\r
+spec:\r
+  type: NodePort\r
+  ports:\r
+  - name: http\r
+    port: 6000\r
+    protocol: TCP\r
+    nodePort: {{ .Values.nodePort}}\r
+  selector:\r
+    app: {{ .Values.appName }}\r
+    version: {{ .Values.version}}\r
diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml
new file mode 100644 (file)
index 0000000..5c3e5a0
--- /dev/null
@@ -0,0 +1,23 @@
+appName: a1-policy-manager-vth\r
+env: dev\r
+version: 0.0.1-SNAPSHOT\r
+image: dockercentral.it.att.com:5100/com.att.ecomp.otf.dev/a1-policy-manager-vth:0.0.1-SNAPSHOT\r
+namespace: com-att-ecomp-otf-dev #org-oran-otf\r
+nodePort: 32330\r
+replicas: 1\r
+health : /otf/vth/oran/a1/v1/health\r
+sharedCert: otf-cert-secret-builder\r
+pvc:\r
+  dev: org-oran-otf-dev-logs-pv\r
+  prod: org-oran-otf-prod-logs-pv\r
+auth:\r
+  enabled: true\r
+  user: user\r
+  pw: pw\r
+proxy:\r
+  enabled: false\r
+  http: \r
+  https: \r
+api:\r
+  base_url: http://njcdtl08rg9907.itservices.sbc.com\r
+  port: 3000\r
diff --git a/a1-policy-manager-vth/pip-requirements.txt b/a1-policy-manager-vth/pip-requirements.txt
new file mode 100644 (file)
index 0000000..d25f478
--- /dev/null
@@ -0,0 +1,6 @@
+flask\r
+flask-cors\r
+FLASK\r
+FLASK-CORS\r
+requests\r
+configparser\r
diff --git a/a1-policy-manager-vth/run.py b/a1-policy-manager-vth/run.py
new file mode 100644 (file)
index 0000000..613ff4f
--- /dev/null
@@ -0,0 +1,52 @@
+"""\r
+#   Copyright (c) 2019 AT&T Intellectual Property.                             #\r
+#                                                                              #\r
+#   Licensed under the Apache License, Version 2.0 (the "License");            #\r
+#   you may not use this file except in compliance with the License.           #\r
+#   You may obtain a copy of the License at                                    #\r
+#                                                                              #\r
+#       http://www.apache.org/licenses/LICENSE-2.0                             #\r
+#                                                                              #\r
+#   Unless required by applicable law or agreed to in writing, software        #\r
+#   distributed under the License is distributed on an "AS IS" BASIS,          #\r
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #\r
+#   See the License for the specific language governing permissions and        #\r
+#   limitations under the License.                                             #\r
+################################################################################\r
+# File name: a1-policy-manager-vth.py                                                         #\r
+# Description: vth for A1 service                                              #\r
+# Date created: 04/22/2020                                                     #\r
+# Last modified: 04/30/2020                                                    #\r
+# Python Version: 3.7.4                                                        #\r
+# Author: Raul Gomez (rg9907)                                                 #\r
+# Email: rg9907@att.com                                                        #\r
+################################################################################\r
+"""\r
+import logging\r
+from logging import FileHandler\r
+from flask import Flask\r
+from flask.logging import create_logger\r
+from app.routes import ROUTES, ERRORS\r
+#from dotenv import load_dotenv\r
+\r
+#load dev env vars\r
+#load_dotenv()\r
+# redirect http to https\r
+APP = Flask(__name__)\r
+LOG = create_logger(APP)\r
+\r
+# Prevents print statement every time an endpoint is triggered.\r
+logging.getLogger("werkzeug").setLevel(logging.DEBUG)\r
+#logging.getLogger("werkzeug").setLevel(logging.WARNING)\r
+APP.register_blueprint(ERRORS)\r
+APP.register_blueprint(ROUTES, url_prefix="/otf/vth/oran/a1/v1")\r
+\r
+if __name__ == '__main__':\r
+    LOG_HANDLER = FileHandler('a1-policy-manager.log', mode='a')\r
+    LOG_HANDLER.setLevel(logging.INFO)\r
+    LOG.setLevel(logging.INFO)\r
+    LOG.addHandler(LOG_HANDLER)\r
+    #context = ('opt/cert/otf.pem', 'opt/cert/privateKey.pem')\r
+    # app.run(debug = False, host = '0.0.0.0', port = 5000, ssl_context = context)\r
+    APP.run(debug=False, host='0.0.0.0', port=6000)\r
+    #APP.run(debug=False, host='0.0.0.0', port=6000, ssl_context = context)\r
diff --git a/a1-sdnc-vth/.environ b/a1-sdnc-vth/.environ
new file mode 100644 (file)
index 0000000..0b3fa87
--- /dev/null
@@ -0,0 +1,9 @@
+#if using dotenv change file name to .env and set env variables below\r
+USER=String\r
+PW=String\r
+AUTH=Boolean\r
+PROXY=Boolean\r
+HTTP=String\r
+HTTPS=String\r
+API_URL=String\r
+API_PORT=Int\r
diff --git a/a1-sdnc-vth/Jenkinsfile b/a1-sdnc-vth/Jenkinsfile
new file mode 100644 (file)
index 0000000..a9bfbaa
--- /dev/null
@@ -0,0 +1,158 @@
+#!/usr/bin/env groovy\r
+\r
+/*  Copyright (c) 2019 AT&T Intellectual Property.                             #\r
+#                                                                              #\r
+#   Licensed under the Apache License, Version 2.0 (the "License");            #\r
+#   you may not use this file except in compliance with the License.           #\r
+#   You may obtain a copy of the License at                                    #\r
+#                                                                              #\r
+#       http://www.apache.org/licenses/LICENSE-2.0                             #\r
+#                                                                              #\r
+#   Unless required by applicable law or agreed to in writing, software        #\r
+#   distributed under the License is distributed on an "AS IS" BASIS,          #\r
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #\r
+#   See the License for the specific language governing permissions and        #\r
+#   limitations under the License.                                             #\r
+##############################################################################*/\r
+\r
+\r
+properties([[$class: 'ParametersDefinitionProperty', parameterDefinitions: [\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'PHASE', defaultValue: "BUILD"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'ENV', defaultValue: "dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'MECHID', defaultValue: "m13591_otf_dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'KUBE_CONFIG', defaultValue: "kubeConfig-dev"],\r
+    [$class: 'hudson.model.StringParameterDefinition', name: 'TILLER_NAMESPACE', defaultValue: "com-att-ecomp-otf-dev"]\r
+]]])\r
+\r
+\r
+    echo "Build branch: ${env.BRANCH_NAME}"\r
+\r
+    node("docker"){\r
+      stage 'Checkout'\r
+        checkout scm\r
+        PHASES=PHASE.tokenize( '_' );\r
+      echo "PHASES : " + PHASES\r
+\r
+\r
+        ARTIFACT_ID="a1-sdnc-vth";\r
+      VERSION="0.0.1-SNAPSHOT";\r
+      NAMESPACE="com.att.ecomp.otf" //TODO change back to org-otf-oran when done testing\r
+        DOCKER_REGISTRY="dockercentral.it.att.com:5100"\r
+\r
+        if( ENV.equalsIgnoreCase("dev") ){\r
+          IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".dev" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+        }\r
+      if( ENV.equalsIgnoreCase("prod") || ENV.equalsIgnoreCase("prod-dr")){\r
+        IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".prod" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+      }\r
+\r
+      if( ENV.equalsIgnoreCase("st") ){\r
+        IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".st" + "/" + ARTIFACT_ID +  ":" + VERSION\r
+\r
+      }\r
+\r
+      echo "Artifact: " + IMAGE_NAME\r
+\r
+        withEnv(["PATH=${env.PATH}:${env.WORKSPACE}/linux-amd64", "HELM_HOME=${env.WORKSPACE}"]) {\r
+\r
+          echo "PATH=${env.PATH}"\r
+            echo "HELM_HOME=${env.HELM_HOME}"\r
+\r
+            if (PHASES.contains("BUILD")){\r
+              dir("./a1-sdnc-vth"){\r
+                stage 'Publish Artifact'\r
+\r
+                  withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {\r
+\r
+                    echo "Artifact: " + IMAGE_NAME\r
+\r
+                      sh """\r
+                      docker login $DOCKER_REGISTRY --username $USERNAME --password $PASSWORD\r
+                      docker build -t $IMAGE_NAME .\r
+                      docker push $IMAGE_NAME\r
+                      """\r
+                  }\r
+              }\r
+\r
+\r
+            }\r
+\r
+          if (PHASES.contains("DEPLOY") || PHASES.contains("UNDEPLOY")) {\r
+\r
+            stage 'Init Helm'\r
+\r
+              //check if helm exists if not install\r
+              if(fileExists('linux-amd64/helm')){\r
+                sh """\r
+                  echo "helm is already installed"\r
+                  """\r
+              }\r
+              else{\r
+                //download helm\r
+                sh """\r
+                  echo "installing helm"\r
+                  wget  https://storage.googleapis.com/kubernetes-helm/helm-v2.14.3-linux-amd64.tar.gz\r
+                  tar -xf helm-v2.14.3-linux-amd64.tar.gz\r
+                  rm helm-v2.14.3-linux-amd64.tar.gz\r
+                  """\r
+              }\r
+\r
+            withCredentials([file(credentialsId: KUBE_CONFIG, variable: 'KUBECONFIG')]) {\r
+\r
+              dir('a1-sdnc-vth/helm'){\r
+                //check if charts are valid, and then perform dry run, if successful then upgrade/install charts\r
+\r
+                if (PHASES.contains("UNDEPLOY") ) {\r
+                  stage 'Undeploy'\r
+\r
+                    sh """\r
+                    helm delete --tiller-namespace=$TILLER_NAMESPACE --purge $ARTIFACT_ID\r
+                    """\r
+                }\r
+\r
+                //NOTE Double quotes are used below to access groovy variables like artifact_id and tiller_namespace\r
+                if (PHASES.contains("DEPLOY") ){\r
+                  stage 'Deploy'\r
+                    withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {\r
+\r
+                      sh """\r
+                        echo "Validate Yaml"\r
+                        helm lint $ARTIFACT_ID\r
+\r
+                        echo "View Helm Templates"\r
+                        helm template $ARTIFACT_ID --set appName=$ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+                        echo "Perform Dry Run Of Install"\r
+                        helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install --dry-run $ARTIFACT_ID $ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+\r
+                        echo "Helm Install/Upgrade"\r
+                        helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install $ARTIFACT_ID $ARTIFACT_ID \\r
+                        --set appName=$ARTIFACT_ID \\r
+                        --set version=$VERSION  \\r
+                        --set env=$ENV \\r
+                        --set image=$IMAGE_NAME \\r
+                        --set namespace=$TILLER_NAMESPACE\r
+\r
+                        """\r
+                    }\r
+                }\r
+\r
+              }\r
+            }\r
+          }\r
+\r
+        }\r
+    }\r
diff --git a/a1-sdnc-vth/app/__init__.py b/a1-sdnc-vth/app/__init__.py
new file mode 100644 (file)
index 0000000..14b5496
--- /dev/null
@@ -0,0 +1,11 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Import everything using star, methods or anything that should not be\r
+    used by the outside modules should not be imported on the nested\r
+    __init__ files.\r
+"""\r
+from .routes import *\r
+from .errors import *\r
+from .models import *\r
+from .helpers import *\r
diff --git a/a1-sdnc-vth/app/errors/__init__.py b/a1-sdnc-vth/app/errors/__init__.py
new file mode 100644 (file)
index 0000000..b491f42
--- /dev/null
@@ -0,0 +1,6 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
+from .bad_request_exception import BadRequestException\r
diff --git a/a1-sdnc-vth/app/errors/bad_request_exception.py b/a1-sdnc-vth/app/errors/bad_request_exception.py
new file mode 100644 (file)
index 0000000..a3e3d22
--- /dev/null
@@ -0,0 +1,21 @@
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+class BadRequestException(Exception):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    def __init__(self, status_code=406, message="Not Acceptable Response"):\r
+        cases = {\r
+            401:"Unauthorized",\r
+            403:"Forbidden",\r
+            404:"Not Found",\r
+            423:"Not Operational"\r
+            }\r
+        super().__init__(cases.get(status_code, message))\r
+        self.status_code = status_code\r
+        self.message = message\r
diff --git a/a1-sdnc-vth/app/helpers/__init__.py b/a1-sdnc-vth/app/helpers/__init__.py
new file mode 100644 (file)
index 0000000..3313af8
--- /dev/null
@@ -0,0 +1,12 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
+from .error_helper import *\r
+from .response_helper import *\r
+from .time_helper import *\r
+from .policy_helper import *\r
+from .service_helper import *\r
+from .ric_helper import *\r
+from .action_helper import *\r
diff --git a/a1-sdnc-vth/app/helpers/action_helper.py b/a1-sdnc-vth/app/helpers/action_helper.py
new file mode 100644 (file)
index 0000000..a4f7a3a
--- /dev/null
@@ -0,0 +1,42 @@
+import json\r
+import ast\r
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app, jsonify\r
+from app.errors.bad_request_exception import BadRequestException\r
+import requests\r
+\r
+\r
+def execute_action(request, response_dict, config):\r
+    headers = ResponseHelper.create_headers();\r
+    request_data = request.json\r
+    action_request = request_data.get("action").lower()\r
+    \r
+    creds = ResponseHelper.get_credentials(request_data, config)\r
+    proxies = ResponseHelper.get_proxies(config)\r
+    url = ResponseHelper.create_url(config=config, uri_path="/restconf/operations/A1-ADAPTER-API:"+action_request)\r
+#    ret_url = request.args.get('retURL')\r
+\r
+    json_req = ast.literal_eval(request_data["action_data"]["jsonBody"])\r
+    current_app.logger.info("Requesting Url: {}, body: {}, auth: {}, proxies: {}".format(url, json_req, creds, proxies))\r
+    try:\r
+        res = requests.post(url, proxies=proxies, auth=creds, headers=headers, json=json_req)\r
+        response = {\r
+                "status_code":res.status_code,\r
+                "result": res.json()\r
+                }\r
+    except(json.decoder.JSONDecodeError):\r
+        response = {\r
+                "status_code":res.status_code,\r
+                "result": res.reason\r
+                }\r
+    except requests.exceptions.RequestException:\r
+        response = {\r
+                "status_code":504,\r
+                "result": "Something Happned"\r
+                }\r
+    finally:\r
+        response_dict['vthResponse']['resultData'] = response\r
+#        if ret_url is not None:\r
+#            ResponseHelper.sendCallback(ret_url,response_dict)\r
+#            return '',200\r
+        return response_dict\r
diff --git a/a1-sdnc-vth/app/helpers/error_helper.py b/a1-sdnc-vth/app/helpers/error_helper.py
new file mode 100644 (file)
index 0000000..b34cedf
--- /dev/null
@@ -0,0 +1,51 @@
+from flask import current_app\r
+import datetime\r
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+\r
+def error_dic(error, status_code, response_message="Something went wrong, vth encountered an error"):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    message = [str(x) for x in error.args]\r
+    error_log={\r
+            "error":{\r
+                "type": error.__class__.__name__,\r
+                "message": message\r
+                }\r
+            }\r
+    response_data = {\r
+        "vthResponse": {\r
+            "testDurationMS": 0,\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            "abstractMessage": "Failed",\r
+            "error":response_message,\r
+            "status_code": status_code,\r
+            "resultData": {}\r
+        }\r
+    }\r
+    current_app.logger.error(error_log)\r
+    return response_data\r
+\r
+def error_dic2(error, status_code=500):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    message = [str(x) for x in error.args]\r
+    response = {\r
+        "status_code" : status_code,\r
+        "success": False,\r
+        "error":{\r
+            "type": error.__class__.__name__,\r
+            "message": message\r
+            }\r
+        }\r
+    return response\r
+\r
diff --git a/a1-sdnc-vth/app/helpers/policy_helper.py b/a1-sdnc-vth/app/helpers/policy_helper.py
new file mode 100644 (file)
index 0000000..ea4fedc
--- /dev/null
@@ -0,0 +1,163 @@
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+import requests\r
+\r
+def get_policy_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    if 'id' not in json_data: raise BadRequestException(406, "Request is missing id")\r
+    param = {'id': json_data['id']}\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+\r
+    return response_dict\r
+def put_policy_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    required = {'id', 'jsonBody', 'ric', 'service'}\r
+    param_keys = {'id', 'ric', 'service'}\r
+    optional = {"type"}\r
+    data_keys = param_keys.copy()\r
+    keys = set(json_data.keys())\r
+    if not required <= keys:\r
+        raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    if optional <= keys: data_keys.update(optional)\r
+    param = {}\r
+    body = {}\r
+    for key in data_keys:\r
+        param[key] = json_data[key]\r
+    body['jsonBody'] = json_data['jsonBody']\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.put(url, auth=creds, params=param, json=body)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def delete_policy_using_delete(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {'id': json_data['id']}\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy")\r
+    res = requests.delete(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
+def get_policy_ids_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            "ric":json_data["ric"] if "ric" in json_data else "",\r
+            "service":json_data["service"] if "service" in json_data else "",\r
+            "type":json_data["type"] if "type" in json_data else ""\r
+            }\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_ids")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
+def get_policy_schema_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {'id': json_data['id']}\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_schema")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def get_policy_schemas_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            "ric":json_data['ric'] if 'ric' in json_data else ""\r
+            }\r
+    #api_response = requests.put(url, credentials=creds, params=param)\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/policy_schemas")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+def get_policy_status_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'id'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+    param = {\r
+            "id":json_data["id"]\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def get_policy_types_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    param = {\r
+            'ric': json_data['ric'] if 'ric' in json_data else ""\r
+            }\r
+\r
+    url = ResponseHelper.create_url(config=config, uri_path="/a1-p/policytypes")\r
+    res = requests.get(url, auth=creds, params=param)\r
+    response = {\r
+            "status_code":res.status_code,\r
+            "result": res.json()\r
+            }\r
+    response_dict['vthResponse']['resultData'] = response\r
+    return response_dict\r
+\r
diff --git a/a1-sdnc-vth/app/helpers/response_helper.py b/a1-sdnc-vth/app/helpers/response_helper.py
new file mode 100644 (file)
index 0000000..833598f
--- /dev/null
@@ -0,0 +1,218 @@
+import ast\r
+import requests\r
+from configparser import ConfigParser\r
+import os\r
+import datetime\r
+import json\r
+from flask import request, jsonify, current_app\r
+from app.helpers.time_helper import unix_time_millis\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+"""\r
+    Module Info:\r
+"""\r
+def create_headers(enable_cache=True, content_type="application/json", connection="Keep-Alive"):\r
+    headers = {'Cache-Control':'no-cache, no-store, must-revalidate', "Pragma":"no-cache", "Expires":"0"} if not enable_cache else {}\r
+    headers['content-type'] = content_type\r
+    headers['connection'] = connection\r
+    return headers\r
+def create_url(config=None, uri_path = "/", url_string=None):\r
+    return config['api']['base_url'] +":"+ config['api']['port']+uri_path if url_string is None else url_string\r
+\r
+def valid_string_json(string, response_message="Invalid json string in query or jsonBody, format requires quoted json object e.g. \"{'key':'value, key2:{'innerKey':'innerValue'}}\""):\r
+    try:\r
+        string_to_dict = ast.literal_eval(string)\r
+    except(Exception):\r
+        raise BadRequestException(406, response_message)\r
+    return True\r
+def route_check(config=None, get_function=None, post_function=None, put_function=None, delete_function=None):\r
+    """\r
+     Info:\r
+        Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse\r
+     Arguments (**kwargs): pass in the specified key(s) and  method(s) that handle the type of method, method must be allowed by route decorator\r
+        get_function => type: function\r
+        put_function => type: function\r
+        delete_function => type: function\r
+    Returns:\r
+        returns the return of the function call, typically a jsonified response.\r
+        you can capture response in a var and execute logic or you can just return the function call/response \r
+    E.G.:\r
+        response = route_check(post_function = handle_post)\r
+        return route_check(get_function = handle_get, post_function = handle_post)\r
+    """\r
+    if not request.is_json: raise BadRequestException(406, "Invalid Json Request")\r
+\r
+    response_dict = vth_response_dic()\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    status_code = 200\r
+    ret_url = request.args.get('retURL')\r
+\r
+    query = ""\r
+    json_body = ""\r
+    request_data = request.json\r
+    json_keys = set(request_data)\r
+    action_request = request_data.get("action").lower()\r
+    valid_actions = {"geta1policytype", "geta1policy", "puta1policy", "deletea1policy", "geta1policystatus"}\r
+    required_keys = {"action", "auth", "action_data"}\r
+\r
+    #check for valid action and json request contains required keys\r
+    if not required_keys <= json_keys: raise BadRequestException(406, "Json request is missing required keys {}".format(required_keys))\r
+    if not action_request in valid_actions: raise BadRequestException(406, "Action is not supported {}".format(action_request))\r
+    #check request's action_data key contains required keys\r
+    if 'query' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody ")\r
+    if 'jsonBody' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody")\r
+\r
+    query = request.json['action_data']['query'] if 'query' in request.json['action_data'] else ""\r
+    json_body = request.json['action_data']['jsonBody'] if 'jsonBody' in request.json['action_data'] else ""\r
+\r
+    if valid_string_json(query) and valid_string_json(json_body):\r
+        if(request.method == 'GET'):\r
+            response_dict = get_function(request, response_dict, config)\r
+        elif(request.method == 'POST'):\r
+            response_dict = post_function(request, response_dict, config)\r
+        elif(request.method == 'PUT'):\r
+            response_dict = put_function(request, response_dict, config)\r
+        elif(request.method == 'DELETE'):\r
+            response_dict = delete_function(request, response_dict, config)\r
+    else:\r
+        raise BadRequestException(406, "Invalid JSON Strings")\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    response_dict['vthResponse']['testDurationMS'] = end_time-start_time\r
+    if ret_url is not None:\r
+        sendCallback(ret_url,response_dict)\r
+        return '',200\r
+    return jsonify(response_dict), status_code\r
+\r
+def get_proxies(config):\r
+    proxy_enabled = config.getboolean('resource', 'proxy_enabled')\r
+    req_proxies = {\r
+        'http': None,\r
+        'https': None\r
+    }\r
+    if not proxy_enabled:\r
+        return None\r
+    else:\r
+        req_proxies['http'] = config['resource']['http_proxy']         \r
+        req_proxies['https'] = config['resource']['https_proxy']\r
+        return req_proxies\r
+def get_credentials(json_data, config):\r
+    auth_enabled = config.getboolean('auth', 'creds_enabled')\r
+    if not auth_enabled:\r
+        return None\r
+    else:\r
+        username = config['auth']['username'] if 'username' not in json_data['auth'] else json_data['auth']['username']\r
+        password = config['auth']['password'] if 'password' not in json_data['auth'] else json_data['auth']['password']\r
+        return (username, password)\r
+def vth_response_dic():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    response_data = {\r
+        "vthResponse": {\r
+            "testDurationMS": "",\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            "abstractMessage": "Success",\r
+            "resultData": {}\r
+        }\r
+    }\r
+    return response_data\r
+#TODO data is data from callback and not my json response\r
+def sendCallback(url, data):\r
+    try:\r
+        if type(data) is not dict:\r
+            data = {"msg": data}\r
+        current_app.logger.info("sending callback")\r
+        requests.post(url, json=data)\r
+    except Exception as e:\r
+        current_app.logger.info(e)\r
+    return\r
+\r
+def get_request_data(request):\r
+    if not request.is_json:\r
+        raise ValueError("request must be json")\r
+    requestData = request.get_json()\r
+    return requestData\r
+\r
+\r
+def valid_json(data):\r
+\r
+    try:\r
+        _ = json.loads(data)\r
+    except ValueError as e:\r
+        return False\r
+    return True\r
+def get_config(config_file_name):\r
+    config = ConfigParser(os.environ)\r
+    config.read(config_file_name)\r
+    return config\r
+\r
+def validate_request(request_data, isPublish=True):\r
+    return\r
+    missing_params = []\r
+\r
+    if 'topic_name' not in request_data:\r
+        missing_params.append("topic_name")\r
+    if isPublish:\r
+        if 'data' not in request_data:\r
+            missing_params.append('data')\r
+    else:\r
+        if 'consumer_group' not in request_data:\r
+            missing_params.append('consumer_group')\r
+        if 'consumer_id' not in request_data:\r
+            missing_params.append('consumer_id')\r
+\r
+    if missing_params:\r
+        err_msg = '{} request requires the following: '.format('publish' if isPublish else 'subscribe')\r
+        err_msg += ','.join(missing_params)\r
+        raise KeyError(err_msg)\r
+\r
+\r
+def build_url(config, request_data, is_publish=True):\r
+    if is_publish:\r
+        base_path = config['resource']['base_address'] + config['resource']['publish']\r
+        topic_name = request_data['topic_name']\r
+        publish_address = base_path.format(topic_name=topic_name)\r
+        return publish_address\r
+\r
+    base_path = config['resource']['base_address'] + config['resource']['subscribe']\r
+    topic_name = request_data['topic_name']\r
+    consumer_group = request_data['consumer_group']\r
+    consumer_id = request_data['consumer_id']\r
+    subscribe_address = base_path.format(topic_name=topic_name, consumer_group=consumer_group, consumer_id=consumer_id)\r
+    if ('timeout' in request_data):\r
+        subscribe_address = (subscribe_address + '?timeout={}').format(request_data['timeout'])\r
+    return subscribe_address\r
+\r
+\r
+def send_request(url, config, is_subscribe_request=False, payload=None):\r
+    # setup default values\r
+    auth_enabled = config.getboolean('auth', 'auth_enabled')\r
+    proxy_enabled = config.getboolean('resource', 'proxy_enabled')\r
+    username = ''\r
+    password = ''\r
+    req_proxies = {\r
+        'http': None,\r
+        'https': None\r
+    }\r
+    # place proxy and authentication information\r
+    if auth_enabled:\r
+        username = config['auth']['username']\r
+        password = config['auth']['password']\r
+    if proxy_enabled:\r
+        req_proxies['http'] = config['resource']['http_proxy']\r
+        req_proxies['https'] = config['resource']['https_proxy']\r
+\r
+    # for subscribe request\r
+    if is_subscribe_request:\r
+        return requests.get(url,\r
+                            auth=(username, password) if auth_enabled else None,\r
+                            proxies=req_proxies if proxy_enabled else None)\r
+    # for publish request\r
+    req_headers = {'Content-type': 'application/json'}\r
+    return requests.post(url,\r
+                         json=payload,\r
+                         auth=(username, password) if auth_enabled else None,\r
+                         proxies=req_proxies if proxy_enabled else None,\r
+                         headers=req_headers)\r
diff --git a/a1-sdnc-vth/app/helpers/ric_helper.py b/a1-sdnc-vth/app/helpers/ric_helper.py
new file mode 100644 (file)
index 0000000..47d55c2
--- /dev/null
@@ -0,0 +1,37 @@
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+def get_ric_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'managedElementId'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'managedElementId': json_data['managedElementId']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def get_rics_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+    param = {\r
+            "policyType": json_data["policyType"] if "policyType" in json_data else ""\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
diff --git a/a1-sdnc-vth/app/helpers/service_helper.py b/a1-sdnc-vth/app/helpers/service_helper.py
new file mode 100644 (file)
index 0000000..16d9b92
--- /dev/null
@@ -0,0 +1,78 @@
+\r
+from app.helpers import response_helper as ResponseHelper\r
+from flask import current_app\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+def get_services_using_get(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    param = {\r
+            'name': json_data['name'] if 'name' in json_data else ""\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def delete_services_using_delete(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'name'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'name': json_data['name']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+def put_service_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'registrationInfo'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'registrationInfo': json_data['registrationInfo']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
+\r
+def keep_alive_service_using_put(request, response_dict, config):\r
+    json_data = request.get_json()\r
+    #username = config['auth']['username'] if 'username' not in json_data else json_data['username']\r
+    #password = config['auth']['password'] if 'password' not in json_data else json_data['password']\r
+    #creds = (username, password)\r
+    creds = ResponseHelper.get_credentials(json_data, config)\r
+    current_app.logger.info("creds: {}".format(creds))\r
+\r
+    keys = set(json_data.keys())\r
+    required = {'name'}\r
+    if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required))\r
+\r
+    param = {\r
+            'name': json_data['name']\r
+            }\r
+\r
+    response_dict['vthResponse']['resultData'] = param\r
+    #api_response = requests.get(url, credentials=creds, params=param)\r
+    return response_dict\r
diff --git a/a1-sdnc-vth/app/helpers/time_helper.py b/a1-sdnc-vth/app/helpers/time_helper.py
new file mode 100644 (file)
index 0000000..b882d0b
--- /dev/null
@@ -0,0 +1,24 @@
+"""\r
+    Module Info:\r
+"""\r
+import datetime\r
+\r
+def unix_time_millis(d_time):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    epoch = datetime.datetime.utcfromtimestamp(0)\r
+    return (d_time - epoch).total_seconds() * 1000.0\r
+\r
+def timed_function(func):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    func()\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    return end_time - start_time\r
diff --git a/a1-sdnc-vth/app/models/__init__.py b/a1-sdnc-vth/app/models/__init__.py
new file mode 100644 (file)
index 0000000..52319a0
--- /dev/null
@@ -0,0 +1,6 @@
+\r
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Only imort methods that can be used and are used by outside modules\r
+"""\r
diff --git a/a1-sdnc-vth/app/routes/__init__.py b/a1-sdnc-vth/app/routes/__init__.py
new file mode 100644 (file)
index 0000000..89419e1
--- /dev/null
@@ -0,0 +1,19 @@
+"""\r
+    Module Info:\r
+    Anything imported to this file will be available to outside modules.\r
+    Routes need to be exported to be usable, if removed, routes will not be found and response\r
+    will be a 500.\r
+    ROUTE order matters, because ROUTE is like a global var used by all the other modules\r
+    it needs to be above them all\r
+"""\r
+from flask import Blueprint\r
+from app.helpers.response_helper import get_config\r
+\r
+ROUTES = Blueprint('routes', __name__)\r
+config = get_config("config.ini")\r
+\r
+from .policy import *\r
+from .ric import *\r
+from .service import *\r
+from .info import *\r
+from .errors import ERRORS\r
diff --git a/a1-sdnc-vth/app/routes/errors.py b/a1-sdnc-vth/app/routes/errors.py
new file mode 100644 (file)
index 0000000..43e1ec1
--- /dev/null
@@ -0,0 +1,33 @@
+"""\r
+Module Info:\r
+"""\r
+from flask import jsonify, current_app, Blueprint\r
+from app.helpers.error_helper import error_dic\r
+from app.errors.bad_request_exception import BadRequestException\r
+import traceback\r
+\r
+ERRORS = Blueprint('errors', __name__)\r
+\r
+@ERRORS.app_errorhandler(BadRequestException)\r
+def handle_bad_request(error):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.info(error)\r
+    response = error_dic(error, error.status_code, error.message)\r
+    print(traceback.format_exc())\r
+    return jsonify(response), error.status_code\r
+\r
+@ERRORS.app_errorhandler(Exception)\r
+def handle_error(error):\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    status_code = 500\r
+    response = error_dic(error, status_code)\r
+    print(traceback.format_exc())\r
+    return jsonify(response), status_code\r
diff --git a/a1-sdnc-vth/app/routes/info.py b/a1-sdnc-vth/app/routes/info.py
new file mode 100644 (file)
index 0000000..7090cf3
--- /dev/null
@@ -0,0 +1,76 @@
+"""\r
+Args:\r
+Returns:\r
+Examples:\r
+"""\r
+import json\r
+import datetime\r
+from flask import current_app, jsonify, request\r
+import time\r
+import requests\r
+from app.errors.bad_request_exception import BadRequestException\r
+from app.helpers.time_helper import unix_time_millis, timed_function\r
+from app.helpers.response_helper import vth_response_dic\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import action_helper as Info\r
+from . import config, ROUTES\r
+\r
+\r
+@ROUTES.route("/handle_action", methods=['POST'])\r
+def handle_action_request():\r
+    return ResponseHelper.route_check(config=config, post_function = Info.execute_action)\r
+\r
+\r
+@ROUTES.route("/", methods=['GET'])\r
+def get_base():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.info(request.method)\r
+    response = vth_response_dic()\r
+    data = current_app.url_map\r
+    rules = []\r
+    methods_list = []\r
+    for rule in data.iter_rules():\r
+        ma = {rule.rule:[]}\r
+        for val in rule.methods:\r
+            if (val != "OPTIONS") and (val !="HEAD"):\r
+                #print(val)\r
+                ma[rule.rule].append(val)\r
+        rules.append(ma)\r
+\r
+    #    methods_set.add(rule.methods)\r
+        #print(rule.methods)\r
+    #print(rules)\r
+    response["vthResponse"]["resultData"] = rules\r
+    #current_app.logger.info(current_app.url_map)\r
+    current_app.logger.debug("hit health point")\r
+    return jsonify(response)\r
+\r
+@ROUTES.route("/health", methods=['GET'])\r
+def get_health():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    current_app.logger.debug("hit health point")\r
+    return "UP"\r
+\r
+@ROUTES.route("/status", methods=['GET'])\r
+def get_status():\r
+    """\r
+    Args:\r
+    Returns:\r
+    Examples:\r
+    """\r
+    suma = lambda: time.sleep(1)\r
+    #current_app.logger.info(current_app.url_map)\r
+    current_app.logger.info(unix_time_millis(datetime.datetime.now()))\r
+    current_app.logger.info(timed_function(suma))\r
+    current_app.logger.debug("some stuff")\r
+    #raise Exception("some error")\r
+    raise BadRequestException()\r
+    return "Running"\r
diff --git a/a1-sdnc-vth/app/routes/policy.py b/a1-sdnc-vth/app/routes/policy.py
new file mode 100644 (file)
index 0000000..588397f
--- /dev/null
@@ -0,0 +1,225 @@
+\r
+import datetime\r
+import json\r
+import logging\r
+from logging import FileHandler\r
+import os\r
+\r
+import requests\r
+from flask import Flask, request, jsonify\r
+from . import config, ROUTES\r
+from app.helpers import policy_helper as Policy\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.errors.bad_request_exception import BadRequestException\r
+\r
+\r
+\r
+def sendCallback(url, data):\r
+    try:\r
+        if type(data) is not dict:\r
+            data = {"msg": data}\r
+        app.logger.info("sending callback")\r
+        requests.post(url, json=data)\r
+    except Exception as e:\r
+        app.logger.info(e)\r
+    return\r
+\r
+def unix_time_millis(dt):\r
+    epoch = datetime.datetime.utcfromtimestamp(0)\r
+    return (dt - epoch).total_seconds() * 1000.0\r
+\r
+\r
+def route_check2(get_function=None, post_function=None, put_function=None, delete_function=None):\r
+    """\r
+     Info:\r
+        Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse\r
+     Arguments (**kwargs): pass in the specified key(s) and  method(s) that handle the type of method, method must be allowed by route decorator\r
+        get_function => type: function\r
+        put_function => type: function\r
+        delete_function => type: function\r
+    Returns:\r
+        returns the return of the function call, typically a jsonified response.\r
+        you can capture response in a var and execute logic or you can just return the function call/response \r
+    E.G.:\r
+        response = route_check(post_function = handle_post)\r
+        return route_check(get_function = handle_get, post_function = handle_post)\r
+    """\r
+    response_dict = ResponseHelper.vth_response_dic()\r
+    start_time = unix_time_millis(datetime.datetime.now())\r
+    status_code = 200\r
+    if request.is_json and ResponseHelper.valid_json(request.data):\r
+        if(request.method == 'GET'):\r
+            response_dict = get_function(request, response_dict, config)\r
+        elif(request.method == 'POST'):\r
+            response_dict = post_function(request, response_dict, config)\r
+        elif(request.method == 'PUT'):\r
+            response_dict = put_function(request, response_dict, config)\r
+        elif(request.method == 'DELETE'):\r
+            response_dict = delete_function(request, response_dict, config)\r
+    else:\r
+        raise BadRequestException(406, "Invalid Json")\r
+    end_time = unix_time_millis(datetime.datetime.now())\r
+    response_dict['vthResponse']['testDurationMS'] = end_time-start_time\r
+    return jsonify(response_dict), status_code\r
+\r
+\r
+@ROUTES.route("/policies", methods=['GET'])\r
+def policies():\r
+    pass\r
+\r
+@ROUTES.route("/policy", methods=['GET', 'PUT', 'DELETE'])\r
+def handle_policy():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_using_get, put_function = Policy.put_policy_using_put, delete_function=Policy.delete_policy_using_delete)\r
+    \r
+\r
+@ROUTES.route("/policy_ids", methods=['GET'])\r
+def handle_policy_ids():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_ids_using_get)\r
+\r
+@ROUTES.route("/policy_schemas", methods=['GET'])\r
+def handle_policy_schemas():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schemas_using_get)\r
+\r
+@ROUTES.route("/policy_schema", methods=['GET'])\r
+def handle_policy_schema():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schema_using_get)\r
+\r
+@ROUTES.route("/policy_status", methods=['GET'])\r
+def handle_policy_status():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_status_using_get)\r
+\r
+@ROUTES.route("/policy_types", methods=['GET'])\r
+def handle_policy_types():\r
+    return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_types_using_get)\r
+\r
+\r
+@ROUTES.route("/", methods=['POST'])\r
+def executeRicRequest():\r
+    response_data = {\r
+        'vthResponse': {\r
+            'testDuration': '',\r
+            'dateTimeUTC': str(datetime.datetime.now()),\r
+            'abstractMessage': '',\r
+            'resultData': {}\r
+        }\r
+    }\r
+\r
+    startTime = unix_time_millis(datetime.datetime.now())\r
+    ret_url = request.args.get('retURL')\r
+    try:\r
+        if not request.is_json:\r
+            raise ValueError("request must be json")\r
+\r
+        requestData = request.get_json()\r
+\r
+        app.logger.info("A1 requestData:" + str(requestData))\r
+\r
+        action = requestData['action'].lower()\r
+        _check_incoming_request(requestData)\r
+\r
+        os.environ['NO_PROXY'] = '127.0.0.1'  # TODO testing purpose w/ mock server. Needs to remove on final version\r
+        with open('config.json') as configFile:\r
+            config = json.load(configFile)\r
+\r
+        baseAddress = config['base_address']\r
+        if action == 'health_check' or action == 'list_policy':\r
+            res = requests.get(baseAddress + config['actions_path'][action])\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if action == 'health_check':\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'list_policy_instance':\r
+            res = requests.get(baseAddress + config['actions_path'][action]\r
+                               .format(policy_type_id=requestData['policy_type_id']))\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'get_policy_instance_status':\r
+            res = requests.get(baseAddress + config['actions_path'][action]\r
+                               .format(policy_type_id=requestData['policy_type_id'],\r
+                                       policy_instance_id=requestData['policy_instance_id']))\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+        elif action == 'edit_policy':\r
+            res = _send_edit_request(requestData, config)\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if requestData['request_type'].lower() == 'get' and res.status_code == 200:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+        elif action == 'edit_policy_instance':\r
+            res = _send_edit_request(requestData, config)\r
+            response_data['vthResponse']['resultData']['statusCode'] = res.status_code\r
+            if requestData['request_type'].lower() == 'get' and res.status_code == 200:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.json()\r
+            else:\r
+                response_data['vthResponse']['resultData']['resultOutput'] = res.text\r
+\r
+    except Exception as ex:\r
+        endTime = unix_time_millis(datetime.datetime.now())\r
+        totalTime = endTime - startTime\r
+        response_data['vthResponse']['testDuration'] = totalTime\r
+        response_data['vthResponse']['abstractMessage'] = str(ex)\r
+        return jsonify(response_data)\r
+\r
+    endTime = unix_time_millis(datetime.datetime.now())\r
+    totalTime = endTime - startTime\r
+\r
+    response_data['vthResponse']['testDuration'] = totalTime\r
+\r
+    if ret_url is not None:\r
+        sendCallback(ret_url, response_data)\r
+        return '', 200\r
+\r
+    return jsonify(response_data), 200\r
+\r
+\r
+def _send_edit_request(request_data, config):\r
+    baseAddress = config['base_address']\r
+    path = ''\r
+    action = request_data['action']\r
+    policy_type_id = request_data['policy_type_id']\r
+    request_type = request_data['request_type']\r
+    if action == "edit_policy":\r
+        path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id)\r
+    if action == 'edit_policy_instance':\r
+        instance_id = request_data['policy_instance_id']\r
+        path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id,\r
+                                                                   policy_instance_id=instance_id)\r
+    if request_type == 'get':\r
+        return requests.get(path)\r
+    if request_type == 'put':\r
+        payload = request_data['payload']\r
+        return requests.put(path, payload)\r
+    if request_type == 'delete':\r
+        return requests.delete(path)\r
+\r
+\r
+def _check_incoming_request(requestData):  # check if the request is valid\r
+    if 'action' not in requestData:\r
+        raise KeyError('no action was specify')\r
+\r
+    action = requestData['action'].lower()\r
+    edit_actions = ['edit_policy', 'edit_policy_instance']\r
+    requires_policy_id = ['edit_policy', 'list_policy_instance'\r
+        , 'edit_policy_instance', 'get_policy_instance_status']\r
+    requires_policy_instance_id = ['edit_policy_instance', 'get_policy_instance_status']\r
+    possible_actions = ['health_check', 'list_policy', 'edit_policy', 'list_policy_instance'\r
+        , 'edit_policy_instance', 'get_policy_instance_status']\r
+    possible_request_type = ['get', 'put', 'delete']\r
+\r
+    if action not in possible_actions:\r
+        raise KeyError("invalid action")\r
+    if action in edit_actions:  # request type is required\r
+        if 'request_type' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a request type')\r
+        if requestData['request_type'] not in possible_request_type:\r
+            raise KeyError('this request_type: ' + requestData['request_type'] + ' is not valid')\r
+        if requestData['request_type'] == 'put' and 'payload' not in requestData:\r
+            raise KeyError('put request requires a payload')\r
+    if action in requires_policy_id:\r
+        if 'policy_type_id' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a policy_type_id')\r
+    if action in requires_policy_instance_id:\r
+        if 'policy_instance_id' not in requestData:\r
+            raise KeyError('this action: ' + action + ' requires a policy_instance_id')\r
diff --git a/a1-sdnc-vth/app/routes/ric.py b/a1-sdnc-vth/app/routes/ric.py
new file mode 100644 (file)
index 0000000..8441ac6
--- /dev/null
@@ -0,0 +1,12 @@
+\r
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import ric_helper as Ric\r
+from . import config, ROUTES\r
+\r
+@ROUTES.route("/ric", methods=['GET'])\r
+def handle_ric():\r
+    return ResponseHelper.route_check(config=config, get_function=Ric.get_ric_using_get)\r
+\r
+@ROUTES.route("/rics", methods=['GET'])\r
+def handle_rics():\r
+    return ResponseHelper.route_check(config=config, get_function=Ric.get_rics_using_get)\r
diff --git a/a1-sdnc-vth/app/routes/service.py b/a1-sdnc-vth/app/routes/service.py
new file mode 100644 (file)
index 0000000..e06bf94
--- /dev/null
@@ -0,0 +1,16 @@
+from app.helpers import response_helper as ResponseHelper\r
+from app.helpers import service_helper as Service\r
+from . import config, ROUTES\r
+\r
+@ROUTES.route("/services", methods=['GET', 'DELETE'])\r
+def handleS_services():\r
+    return ResponseHelper.route_check(config=config, get_function=Service.get_services_using_get, delete_function=Service.delete_services_using_delete)\r
+\r
+\r
+@ROUTES.route("/service", methods=['PUT'])\r
+def handle_service():\r
+    return ResponseHelper.route_check(config=config, put_function=Service.put_service_using_put)\r
+\r
+@ROUTES.route("/services/keepalive", methods=['PUT'])\r
+def handle_services_keepalive():\r
+    return ResponseHelper.route_check(config=config, put_function=Service.keep_alive_service_using_put)\r
diff --git a/a1-sdnc-vth/config.ini b/a1-sdnc-vth/config.ini
new file mode 100644 (file)
index 0000000..e9bc817
--- /dev/null
@@ -0,0 +1,14 @@
+[auth]\r
+creds_enabled= %(USE_CRED)s\r
+username = %(USER)s\r
+password = %(PW)s\r
+[api]\r
+base_url= %(API_URL)s\r
+port= %(API_PORT)s\r
+[resource]\r
+proxy_enabled = %(USE_PROXY)s\r
+https_proxy= %(HTTPS)s\r
+http_proxy= %(HTTP)s\r
+base_address = %(API_URL)s\r
+publish = /{topic_name}\r
+subscribe = /{topic_name}/{consumer_group}/{consumer_id}\r
diff --git a/a1-sdnc-vth/doc/a1-documentation.docx b/a1-sdnc-vth/doc/a1-documentation.docx
new file mode 100644 (file)
index 0000000..335e2a0
Binary files /dev/null and b/a1-sdnc-vth/doc/a1-documentation.docx differ
diff --git a/a1-sdnc-vth/docker/Dockerfile b/a1-sdnc-vth/docker/Dockerfile
new file mode 100644 (file)
index 0000000..960368c
--- /dev/null
@@ -0,0 +1,18 @@
+FROM python:3.7.4\r
+\r
+RUN python --version\r
+\r
+ADD pip-requirements.txt pip-requirements.txt\r
+ADD app app\r
+ADD config.ini config.ini\r
+ADD run.py run.py\r
+\r
+RUN mkdir -p /otf/logs\r
+\r
+RUN python -m pip install --proxy http://one.proxy.att.com:8080 -r pip-requirements.txt\r
+\r
+ENV USER=default_user\r
+ENV PW=default_pass\r
+\r
+\r
+ENTRYPOINT ["python", "run.py"]\r
diff --git a/a1-sdnc-vth/docker/container-tag.yaml b/a1-sdnc-vth/docker/container-tag.yaml
new file mode 100644 (file)
index 0000000..ee078db
--- /dev/null
@@ -0,0 +1,15 @@
+---
+#   Copyright (c) 2019 AT&T Intellectual Property.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+tag: 0.0.1
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore b/a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore
new file mode 100644 (file)
index 0000000..daebc7d
--- /dev/null
@@ -0,0 +1,21 @@
+# Patterns to ignore when building packages.\r
+# This supports shell glob matching, relative path matching, and\r
+# negation (prefixed with !). Only one pattern per line.\r
+.DS_Store\r
+# Common VCS dirs\r
+.git/\r
+.gitignore\r
+.bzr/\r
+.bzrignore\r
+.hg/\r
+.hgignore\r
+.svn/\r
+# Common backup files\r
+*.swp\r
+*.bak\r
+*.tmp\r
+*~\r
+# Various IDEs\r
+.project\r
+.idea/\r
+*.tmproj\r
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml
new file mode 100644 (file)
index 0000000..6c6bf9a
--- /dev/null
@@ -0,0 +1,5 @@
+apiVersion: v1\r
+appVersion: "1.0"\r
+description: A Helm chart for the a1 sdnc Virtual Test Head \r
+name: a1-sdnc-vth\r
+version: 0.0.1\r
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..ec08ac3
--- /dev/null
@@ -0,0 +1,137 @@
+apiVersion: extensions/v1beta1\r
+kind: Deployment\r
+metadata:\r
+  name: {{ .Values.appName}}\r
+  namespace: {{.Values.namespace}}\r
+  labels:\r
+    app: {{ .Values.appName}}\r
+    version: {{.Values.version}}\r
+spec:\r
+  revisionHistoryLimit: 1\r
+  minReadySeconds: 10\r
+  strategy:\r
+  # indicate which strategy we want for rolling update\r
+    type: RollingUpdate\r
+    rollingUpdate:\r
+      maxSurge: 0\r
+      maxUnavailable: 1\r
+  replicas: {{ .Values.replicas}}\r
+  selector:\r
+    matchLabels:\r
+      app: {{ .Values.appName}}\r
+      version: {{.Values.version}}\r
+  template:\r
+    metadata:\r
+      labels:\r
+        app: {{ .Values.appName}}\r
+        version: {{.Values.version}}\r
+    spec:\r
+      serviceAccount: default\r
+      volumes:\r
+      - name: {{ .Values.appName}}-cert-volume\r
+        secret:\r
+          secretName: {{.Values.sharedCert}}\r
+          optional: true\r
+          items:\r
+          - key: PEM_CERT\r
+            path: otf.pem\r
+          - key: PEM_KEY\r
+            path: privateKey.pem\r
+#      {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} TODO UNCOMMENT WHEN PUSHING TO ORAN\r
+#      {{else}}\r
+#      - name: logging-pvc\r
+#        persistentVolumeClaim:\r
+#          {{if eq .Values.env "prod"}}\r
+#          claimName: {{ .Values.pvc.prod | quote }}\r
+#          {{ else }}\r
+#          claimName: {{ .Values.pvc.dev | quote }}\r
+#          {{ end }}\r
+#      {{end}}\r
+      containers:\r
+      - name: {{ .Values.appName}}\r
+        image: {{ .Values.image}}\r
+        imagePullPolicy: Always\r
+        ports:\r
+        - name: http\r
+          containerPort: 6001\r
+          nodePort: {{.Values.nodePort}}\r
+          protocol: TCP\r
+#        {{ if eq .Values.env "st"}} TODO UNCOMMENT FOR ORAN?\r
+#        resources:\r
+#          limits:\r
+#            memory: "512Mi"\r
+#            cpu: "500m"\r
+#          requests:\r
+#            memory: "256Mi"\r
+#            cpu: "100m"\r
+#        {{else}}\r
+#        resources:\r
+#          limits:\r
+#            memory: "1Gi"\r
+#            cpu: "1"\r
+#          requests:\r
+#            memory: "1Gi"\r
+#            cpu: "1"\r
+#        {{end}}\r
+        env:\r
+        - name: NAMESPACE\r
+          value: {{.Values.namespace}}\r
+        - name: APP_NAME\r
+          value: {{ .Values.appName}}\r
+        - name: APP_VERSION\r
+          value: {{.Values.version}}\r
+        - name: USE_CRED\r
+          value: {{.Values.auth.enabled | quote }}\r
+        - name: USER\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.appName}}\r
+              key: api_user\r
+              optional: true\r
+        - name: PW\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.appName}}\r
+              key: api_pass\r
+              optional: true\r
+        - name: USE_PROXY\r
+          value: {{.Values.proxy.enabled | quote }}\r
+        - name: HTTPS\r
+          value: {{.Values.proxy.https | quote }}\r
+        - name: HTTP\r
+          value: {{.Values.proxy.http | quote }}\r
+        - name: API_URL\r
+          value: {{.Values.api.base_url}}\r
+        - name: API_PORT\r
+          value: {{.Values.api.port | quote }}\r
+        volumeMounts:\r
+        - name: {{.Values.appName}}-cert-volume\r
+          mountPath: /opt/cert\r
+#        {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}}\r
+#        {{else}}\r
+#        - name: logging-pvc\r
+#          mountPath: "/otf/logs"\r
+#        {{end}}\r
+        livenessProbe:\r
+          httpGet:\r
+            path: {{.Values.health}}\r
+            port: http\r
+            scheme: HTTP\r
+            httpHeaders:\r
+              - name: X-Custom-Header\r
+                value: Alive\r
+          initialDelaySeconds: 30\r
+          timeoutSeconds: 30\r
+          periodSeconds: 30\r
+        readinessProbe:\r
+          httpGet:\r
+            path: {{.Values.health}}\r
+            port: http\r
+            scheme: HTTP\r
+            httpHeaders:\r
+              - name: X-Custom-Header\r
+                value: Ready\r
+          initialDelaySeconds: 30\r
+          timeoutSeconds: 30\r
+          periodSeconds: 30\r
+      restartPolicy: Always\r
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml
new file mode 100644 (file)
index 0000000..4a0aa24
--- /dev/null
@@ -0,0 +1,8 @@
+apiVersion: v1\r
+kind: Secret\r
+metadata:\r
+  name: {{ .Values.appName}}\r
+type: Opaque\r
+data:\r
+  api_user: {{ .Values.auth.user | b64enc }}\r
+  api_pass: {{ .Values.auth.pw | b64enc }}\r
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml
new file mode 100644 (file)
index 0000000..bcba0f4
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: v1\r
+kind: Service\r
+metadata:\r
+  name: {{ .Values.appName }}\r
+  namespace: {{ .Values.namespace}}\r
+  labels:\r
+    app: {{ .Values.appName }}\r
+    version: {{ .Values.version}}\r
+spec:\r
+  type: NodePort\r
+  ports:\r
+  - name: http\r
+    port: 6001\r
+    protocol: TCP\r
+    nodePort: {{ .Values.nodePort}}\r
+  selector:\r
+    app: {{ .Values.appName }}\r
+    version: {{ .Values.version}}\r
diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml
new file mode 100644 (file)
index 0000000..fac619d
--- /dev/null
@@ -0,0 +1,23 @@
+appName: a1-sdnc-vth\r
+env: dev\r
+version: 0.0.1-SNAPSHOT\r
+image: dockercentral.it.att.com:5100/com.att.ecomp.otf.dev/a1-sdnc-vth:0.0.1-SNAPSHOT\r
+namespace: com-att-ecomp-otf-dev #org-oran-otf\r
+nodePort: 32331\r
+replicas: 1\r
+health : /otf/vth/oran/a1/v1/health\r
+sharedCert: otf-cert-secret-builder\r
+pvc:\r
+  dev: org-oran-otf-dev-logs-pv\r
+  prod: org-oran-otf-prod-logs-pv\r
+auth:\r
+  enabled: true\r
+  user: user\r
+  pw: pw\r
+proxy:\r
+  enabled: false\r
+  http: \r
+  https: \r
+api:\r
+  base_url: http://njcdtl08rg9907.itservices.sbc.com\r
+  port: 3000\r
diff --git a/a1-sdnc-vth/pip-requirements.txt b/a1-sdnc-vth/pip-requirements.txt
new file mode 100644 (file)
index 0000000..d25f478
--- /dev/null
@@ -0,0 +1,6 @@
+flask\r
+flask-cors\r
+FLASK\r
+FLASK-CORS\r
+requests\r
+configparser\r
diff --git a/a1-sdnc-vth/run.py b/a1-sdnc-vth/run.py
new file mode 100644 (file)
index 0000000..da0f1c7
--- /dev/null
@@ -0,0 +1,51 @@
+"""\r
+#   Copyright (c) 2019 AT&T Intellectual Property.                             #\r
+#                                                                              #\r
+#   Licensed under the Apache License, Version 2.0 (the "License");            #\r
+#   you may not use this file except in compliance with the License.           #\r
+#   You may obtain a copy of the License at                                    #\r
+#                                                                              #\r
+#       http://www.apache.org/licenses/LICENSE-2.0                             #\r
+#                                                                              #\r
+#   Unless required by applicable law or agreed to in writing, software        #\r
+#   distributed under the License is distributed on an "AS IS" BASIS,          #\r
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.   #\r
+#   See the License for the specific language governing permissions and        #\r
+#   limitations under the License.                                             #\r
+################################################################################\r
+# File name: a1-sdnc-vth.py                                                         #\r
+# Description: vth for A1 service                                              #\r
+# Date created: 04/22/2020                                                     #\r
+# Last modified: 04/30/2020                                                    #\r
+# Python Version: 3.7.4                                                        #\r
+# Author: Raul Gomez (rg9907)                                                 #\r
+# Email: rg9907@att.com                                                        #\r
+################################################################################\r
+"""\r
+import logging\r
+from logging import FileHandler\r
+from flask import Flask\r
+from flask.logging import create_logger\r
+from app.routes import ROUTES, ERRORS\r
+#from dotenv import load_dotenv\r
+\r
+#load dev env vars\r
+#load_dotenv()\r
+# redirect http to https\r
+APP = Flask(__name__)\r
+LOG = create_logger(APP)\r
+\r
+# Prevents print statement every time an endpoint is triggered.\r
+logging.getLogger("werkzeug").setLevel(logging.DEBUG)\r
+#logging.getLogger("werkzeug").setLevel(logging.WARNING)\r
+APP.register_blueprint(ERRORS)\r
+APP.register_blueprint(ROUTES, url_prefix="/otf/vth/oran/a1/v1")\r
+\r
+if __name__ == '__main__':\r
+    LOG_HANDLER = FileHandler('a1-sdnc-vth.log', mode='a')\r
+    LOG_HANDLER.setLevel(logging.INFO)\r
+    LOG.setLevel(logging.INFO)\r
+    LOG.addHandler(LOG_HANDLER)\r
+   # context = ('opt/cert/otf.pem', 'opt/cert/privateKey.pem')\r
+    # app.run(debug = False, host = '0.0.0.0', port = 5000, ssl_context = context)\r
+    APP.run(debug=False, host='0.0.0.0', port=6001)\r
diff --git a/otf-helm/.gitignore b/otf-helm/.gitignore
new file mode 100644 (file)
index 0000000..f92f978
--- /dev/null
@@ -0,0 +1 @@
+*.tgz\r
diff --git a/otf-helm/deploy.sh b/otf-helm/deploy.sh
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/otf-helm/otf/.helmignore b/otf-helm/otf/.helmignore
new file mode 100644 (file)
index 0000000..05d5aab
--- /dev/null
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.\r
+# This supports shell glob matching, relative path matching, and\r
+# negation (prefixed with !). Only one pattern per line.\r
+.DS_Store\r
+# Common VCS dirs\r
+.git/\r
+.gitignore\r
+.bzr/\r
+.bzrignore\r
+.hg/\r
+.hgignore\r
+.svn/\r
+# Common backup files\r
+*.swp\r
+*.bak\r
+*.tmp\r
+*~\r
+# Various IDEs\r
+.project\r
+.idea/\r
+*.tmproj\r
+.vscode/\r
diff --git a/otf-helm/otf/Chart.yaml b/otf-helm/otf/Chart.yaml
new file mode 100644 (file)
index 0000000..d1eda9c
--- /dev/null
@@ -0,0 +1,21 @@
+apiVersion: v2\r
+name: otf\r
+description: A Helm chart for OTF\r
+\r
+# A chart can be either an 'application' or a 'library' chart.\r
+#\r
+# Application charts are a collection of templates that can be packaged into versioned archives\r
+# to be deployed.\r
+#\r
+# Library charts provide useful utilities or functions for the chart developer. They're included as\r
+# a dependency of application charts to inject those utilities and functions into the rendering\r
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.\r
+type: application\r
+\r
+# This is the chart version. This version number should be incremented each time you make changes\r
+# to the chart and its templates, including the app version.\r
+version: 1.0\r
+\r
+# This is the version number of the application being deployed. This version number should be\r
+# incremented each time you make changes to the application.\r
+appVersion: Camille.2.1\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml b/otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml
new file mode 100644 (file)
index 0000000..465f465
--- /dev/null
@@ -0,0 +1,20 @@
+apiVersion: v1\r
+name: mongodb\r
+version: 7.8.10\r
+appVersion: 4.2.6\r
+# The mongodb chart is deprecated and no longer maintained. For details deprecation, see the PROCESSES.md file.\r
+deprecated: true\r
+description: DEPRECATED NoSQL document-oriented database that stores JSON-like documents with dynamic schemas, simplifying the integration of data in content-driven applications.\r
+keywords:\r
+- mongodb\r
+- database\r
+- nosql\r
+- cluster\r
+- replicaset\r
+- replication\r
+home: https://mongodb.org\r
+icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png\r
+sources:\r
+- https://github.com/bitnami/bitnami-docker-mongodb\r
+maintainers: []\r
+engine: gotpl\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json b/otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json
new file mode 100644 (file)
index 0000000..257d37b
--- /dev/null
@@ -0,0 +1,46 @@
+{\r
+    "_id" : ObjectId("5bdb2bdbd6b0d1f97953fbd7"),\r
+    "ownerId" : ObjectId("5b9bf50008a8133dc84c1496"),\r
+    "groupName" : "otf-public-dev",\r
+    "groupDescription" : "The OTF public group used in the dev environment.",\r
+    "parentGroupId" : null,\r
+    "members" : [ \r
+        {\r
+            "roles" : [ \r
+                "admin"\r
+            ],\r
+            "userId" : ObjectId("5b9bf50008a8133dc84c1496")\r
+        }\r
+    ],\r
+    "roles" : [ \r
+        {\r
+            "permissions" : [ \r
+                "read", \r
+                "write", \r
+                "execute", \r
+                "delete", \r
+                "management"\r
+            ],\r
+            "roleName" : "admin"\r
+        }, \r
+        {\r
+            "permissions" : [ \r
+                "read"\r
+            ],\r
+            "roleName" : "user"\r
+        }, \r
+        {\r
+            "permissions" : [ \r
+                "read", \r
+                "write", \r
+                "execute", \r
+                "delete"\r
+            ],\r
+            "roleName" : "developer"\r
+        }\r
+    ],\r
+    "mechanizedIds" : [ \r
+    ],\r
+    "_class" : "com.att.otf.api.domain.Group",\r
+    "updatedAt" : ISODate("2020-05-05T21:58:56.381Z")\r
+}
\ No newline at end of file
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh b/otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh
new file mode 100644 (file)
index 0000000..a3ebcde
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash\r
+\r
+sleep 10;\r
+mongoimport -c=users -d=otf --mode=upsert --username=otfuser --password=Today.123 --file=/data/scripts/users.json\r
+mongoimport -c=users -d=otf --mode=upsert --username=otfuser --password=Today.123 --file=/data/scripts/groups.json\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json b/otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json
new file mode 100644 (file)
index 0000000..059f9b2
--- /dev/null
@@ -0,0 +1,28 @@
+{\r
+    "_id" : ObjectId("5b9bf50008a8133dc84c1496"),\r
+    "permissions" : [ \r
+        "admin"\r
+    ],\r
+    "firstName" : "Admin",\r
+    "lastName" : "Admin",\r
+    "email" : "admin@test.com",\r
+    "password" : "$2a$13$TZQCQrG6LuNdHgpEXB9YgOfaYZC7xG2E3ICE9lO/0Y9rh5gPdbQWu",\r
+    "groups" : [ \r
+        {\r
+            "permissions" : [ \r
+                "admin"\r
+            ],\r
+            "groupId" : ObjectId("5bdb2bdbd6b0d1f97953fbd7")\r
+        }\r
+    ],\r
+    "createdAt" : ISODate("2020-05-05T12:13:05.176Z"),\r
+    "updatedAt" : ISODate("2020-05-05T20:40:16.591Z"),\r
+    "_class" : "com.att.otf.api.domain.User",\r
+    "favorites" : {\r
+        "testDefinitions" : [ \r
+        ]\r
+    },\r
+    "enabled" : true,\r
+    "defaultGroup" : ObjectId("5bdb2bdbd6b0d1f97953fbd7"),\r
+    "defaultGroupEnabled" : true\r
+}
\ No newline at end of file
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml
new file mode 100644 (file)
index 0000000..eefbba9
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: v1\r
+kind: ConfigMap\r
+metadata:\r
+  name : {{ .Values.global.mongodb.appName }}-init-scripts\r
+\r
+data:\r
+  {{- $files := .Files }}\r
+  {{- range $key, $value := .Files }}\r
+  {{- if hasPrefix "scripts/" $key }} {{/* only when in scripts/ */}}\r
+  {{ $key | trimPrefix "scripts/" }}: {{ $files.Get $key | quote }} {{/* adapt $key as desired */}}\r
+  {{- end }}\r
+  {{- end }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..78f2e5c
--- /dev/null
@@ -0,0 +1,72 @@
+apiVersion: extensions/v1beta1\r
+kind: Deployment\r
+metadata:\r
+  name: {{ .Values.global.mongodb.appName }}\r
+  namespace: {{.Values.global.mongodb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mongodb.appName }}\r
+    version: {{.Values.global.mongodb.version }}\r
+spec:\r
+  revisionHistoryLimit: 1   # keep one replica set to allow rollback\r
+  minReadySeconds: 10\r
+  strategy:\r
+  # indicate which strategy we want for rolling update\r
+    type: RollingUpdate\r
+    rollingUpdate:\r
+      maxSurge: 1\r
+      maxUnavailable: 1\r
+  replicas: {{ .Values.global.mongodb.replicas }}\r
+  selector:\r
+    matchLabels:\r
+      app: {{ .Values.global.mongodb.appName }}\r
+      version: {{.Values.global.mongodb.version }}\r
+  template:\r
+    metadata:\r
+      labels:\r
+        app: {{ .Values.global.mongodb.appName }}\r
+        version: {{.Values.global.mongodb.version }}\r
+    spec:\r
+      serviceAccount: default\r
+      containers:\r
+      - name: {{ .Values.global.mongodb.appName }}\r
+        image: {{ .Values.global.mongodb.image.registry }}/{{ .Values.global.mongodb.image.repository }}\r
+        imagePullPolicy: Always               \r
+        env:\r
+        - name: MONGODB_PASSWORD\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name : {{ .Values.global.mongodb.appName }}\r
+              key: mongo_password\r
+        - name: MONGODB_ROOT_PASSWORD\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name : {{ .Values.global.mongodb.appName }}\r
+              key: mongo_root_password\r
+        - name: MONGODB_USERNAME\r
+          value: {{ .Values.global.mongodb.mongodbUsername | quote }}\r
+        - name: MONGODB_DATABASE\r
+          value: {{ .Values.global.mongodb.mongodbDatabase | quote }}\r
+        ports:\r
+        - name: mongodb\r
+          containerPort: 27017\r
+          hostPort: 27017\r
+        resources:\r
+          limits: \r
+            memory: {{ .Values.global.mongodb.resources.limits.memory }}\r
+            cpu: {{ .Values.global.mongodb.resources.limits.cpu }}\r
+          requests:\r
+            memory: {{ .Values.global.mongodb.resources.requests.memory }}\r
+            cpu: {{ .Values.global.mongodb.resources.requests.cpu }}\r
+        lifecycle:\r
+          postStart:\r
+            exec:\r
+              command: ["/bin/bash", "-c", "cd data/scripts;./init_db.sh"]\r
+        volumeMounts:\r
+           - name: custom-init-scripts\r
+             mountPath: /data/scripts\r
+      volumes:\r
+        - name: custom-init-scripts\r
+          configMap:\r
+            name: {{  .Values.global.mongodb.appName }}-init-scripts\r
+            defaultMode: 0755\r
+      restartPolicy: Always\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml
new file mode 100644 (file)
index 0000000..6accedb
--- /dev/null
@@ -0,0 +1,30 @@
+# Need Updates to configure the connectivity when we deploy other microservices to connect to mongodb\r
+\r
+apiVersion: extensions/v1beta1\r
+kind: Ingress\r
+metadata:\r
+  name: {{ .Values.global.mongodb.appName }}\r
+  namespace: {{.Values.global.mongodb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mongodb.appName }}\r
+    version: {{.Values.global.mongodb.version }}\r
+  annotations:\r
+    kubernetes.io/ingress.class: nginx\r
+    nginx.ingress.kubernetes.io/ssl-redirect: "true"\r
+    nginx.ingress.kubernetes.io/rewrite-target: /$1\r
+    nginx.ingress.kubernetes.io/configuration-snippet: |\r
+      proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port;\r
+      grpc_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port;\r
+spec:\r
+  tls:\r
+  - hosts:\r
+    - {{ .Values.global.mongodb.nodeApi.host }}\r
+    secretName: {{.Values.global.mongodb.certName }}\r
+  rules:\r
+  - host: {{ .Values.global.mongodb.nodeApi.host }}\r
+    http:\r
+      paths:\r
+      - path: /mongodb/(.*)\r
+        backend:\r
+          serviceName: {{ .Values.global.mongodb.appName }}\r
+          servicePort: {{ .Values.global.mongodb.port }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml
new file mode 100644 (file)
index 0000000..f450e74
--- /dev/null
@@ -0,0 +1,8 @@
+apiVersion: v1\r
+kind: Secret\r
+metadata:\r
+  name: {{ .Values.global.mongodb.appName }}\r
+type: opaque\r
+data:\r
+  mongo_root_password: {{ .Values.global.mongodb.mongodbRootPassword  | b64enc }}\r
+  mongo_password: {{ .Values.global.mongodb.mongodbPassword  | b64enc }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml
new file mode 100644 (file)
index 0000000..c72af9b
--- /dev/null
@@ -0,0 +1,19 @@
+apiVersion: v1\r
+kind: Service\r
+metadata:\r
+  name: {{ .Values.global.mongodb.appName }}\r
+  namespace: {{ .Values.global.mongodb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mongodb.appName }}\r
+    version: {{ .Values.global.mongodb.version }}\r
+  annotations:\r
+    service.beta.kubernetes.io/azure-load-balancer-internal: "true"\r
+spec:\r
+  type: LoadBalancer\r
+  ports:\r
+  - port: {{ .Values.global.mongodb.port }}\r
+    protocol: TCP\r
+    targetPort: {{ .Values.global.mongodb.targetPort }}\r
+  selector:\r
+    app: {{ .Values.global.mongodb.appName }}\r
+    version: {{ .Values.global.mongodb.version }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mongodb/values.yaml b/otf-helm/otf/charts/databases/charts/mongodb/values.yaml
new file mode 100644 (file)
index 0000000..804031f
--- /dev/null
@@ -0,0 +1,510 @@
+# Values yaml file for reference from the github. - currently not used.\r
+\r
+## Global Docker image parameters\r
+## Please, note that this will override the image parameters, including dependencies, configured to use the global value\r
+## Current available global Docker image parameters: imageRegistry and imagePullSecrets\r
+##\r
+# global:\r
+#   imageRegistry: myRegistryName\r
+#   imagePullSecrets:\r
+#     - myRegistryKeySecretName\r
+#   storageClass: myStorageClass\r
+\r
+image:\r
+  ## Bitnami MongoDB registry\r
+  ##\r
+  registry: docker.io\r
+  ## Bitnami MongoDB image name\r
+  ##\r
+  repository: bitnami/mongodb\r
+  ## Bitnami MongoDB image tag\r
+  ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/\r
+  ##\r
+  tag: 4.2.6-debian-10-r18\r
+  ## Specify a imagePullPolicy\r
+  ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images\r
+  ##\r
+  pullPolicy: IfNotPresent\r
+  ## Optionally specify an array of imagePullSecrets.\r
+  ## Secrets must be manually created in the namespace.\r
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\r
+  ##\r
+  # pullSecrets:\r
+  #   - myRegistryKeySecretName\r
+\r
+  ## Set to true if you would like to see extra information on logs\r
+  ## It turns on Bitnami debugging in minideb-extras-base\r
+  ## ref:  https://github.com/bitnami/minideb-extras-base\r
+  debug: false\r
+\r
+## String to partially override mongodb.fullname template (will maintain the release name)\r
+##\r
+# nameOverride: otf-mongo\r
+\r
+## String to fully override mongodb.fullname template\r
+##\r
+# fullnameOverride:\r
+\r
+## Init containers parameters:\r
+## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section.\r
+##\r
+volumePermissions:\r
+  enabled: false\r
+  image:\r
+    registry: docker.io\r
+    repository: bitnami/minideb\r
+    tag: buster\r
+    pullPolicy: Always\r
+    ## Optionally specify an array of imagePullSecrets.\r
+    ## Secrets must be manually created in the namespace.\r
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\r
+    ##\r
+    # pullSecrets:\r
+    #   - myRegistryKeySecretName\r
+  resources: {}\r
+\r
+## Enable authentication\r
+## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/\r
+#\r
+usePassword: true\r
+# existingSecret: name-of-existing-secret\r
+\r
+## MongoDB admin password\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run\r
+##\r
+mongodbRootPassword: otf.123\r
+\r
+## MongoDB custom user and database\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run\r
+##\r
+mongodbUsername: otfuser\r
+mongodbPassword: Today.123\r
+mongodbDatabase: otf\r
+\r
+## Whether enable/disable IPv6 on MongoDB\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6\r
+##\r
+mongodbEnableIPv6: false\r
+\r
+## Whether enable/disable DirectoryPerDB on MongoDB\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb\r
+##\r
+mongodbDirectoryPerDB: false\r
+\r
+## MongoDB System Log configuration\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level\r
+##\r
+mongodbSystemLogVerbosity: 0\r
+mongodbDisableSystemLog: false\r
+\r
+## MongoDB additional command line flags\r
+##\r
+## Can be used to specify command line flags, for example:\r
+##\r
+## mongodbExtraFlags:\r
+##  - "--wiredTigerCacheSizeGB=2"\r
+mongodbExtraFlags: []\r
+\r
+## Pod Security Context\r
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\r
+##\r
+securityContext:\r
+  enabled: true\r
+  fsGroup: 1001\r
+  runAsUser: 1001\r
+\r
+## Kubernetes Cluster Domain\r
+clusterDomain: cluster.local\r
+\r
+## Kubernetes service type\r
+service:\r
+  ## Specify an explicit service name.\r
+  # name: svc-mongo\r
+  ## Provide any additional annotations which may be required.\r
+  ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart\r
+  annotations: {}\r
+  type: ClusterIP\r
+  # clusterIP: None\r
+  port: 27017\r
+\r
+  ## Specify the nodePort value for the LoadBalancer and NodePort service types.\r
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport\r
+  ##\r
+  # nodePort:\r
+\r
+  ## Specify the externalIP value ClusterIP service type.\r
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\r
+  # externalIPs: []\r
+\r
+  ## Specify the loadBalancerIP value for LoadBalancer service types.\r
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\r
+  ##\r
+  # loadBalancerIP:\r
+\r
+  ## Specify the loadBalancerSourceRanges value for LoadBalancer service types.\r
+  ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service\r
+  ##\r
+  # loadBalancerSourceRanges: []\r
+\r
+# Add custom extra environment variables to all the MongoDB containers\r
+# extraEnvVars:\r
+\r
+## Use StatefulSet instead of Deployment when deploying standalone\r
+useStatefulSet: false\r
+\r
+## Setting up replication\r
+## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication\r
+#\r
+replicaSet:\r
+  ## Whether to create a MongoDB replica set for high availability or not\r
+  enabled: false\r
+  useHostnames: true\r
+\r
+  ## Name of the replica set\r
+  ##\r
+  # name: mongoOTF\r
+\r
+  ## Key used for replica set authentication\r
+  ##\r
+  # key: key\r
+\r
+  ## Number of replicas per each node type\r
+  ##\r
+  replicas:\r
+    secondary: 1\r
+    arbiter: 1\r
+\r
+  ## Pod Disruption Budget\r
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/\r
+  pdb:\r
+    enabled: true\r
+    minAvailable:\r
+      primary: 1\r
+      secondary: 1\r
+      arbiter: 1\r
+    # maxUnavailable:\r
+      # primary: 1\r
+      # secondary: 1\r
+      # arbiter: 1\r
+\r
+# Annotations to be added to the deployment or statefulsets\r
+annotations: {}\r
+\r
+# Additional labels to apply to the deployment or statefulsets\r
+labels: {}\r
+\r
+# Annotations to be added to MongoDB pods\r
+podAnnotations: {}\r
+\r
+# Additional pod labels to apply\r
+podLabels: {}\r
+\r
+## Use an alternate scheduler, e.g. "stork".\r
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/\r
+##\r
+# schedulerName:\r
+\r
+## Configure resource requests and limits\r
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/\r
+##\r
+resources: {}\r
+# Define separate resources per arbiter, which are less then primary or secondary\r
+# used only when replica set is enabled\r
+resourcesArbiter: {}\r
+# limits:\r
+#   cpu: 500m\r
+#   memory: 512Mi\r
+# requests:\r
+#   cpu: 100m\r
+#   memory: 256Mi\r
+\r
+## Pod priority\r
+## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/\r
+# priorityClassName: ""\r
+\r
+## Node selector\r
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector\r
+nodeSelector: {}\r
+\r
+## Affinity\r
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\r
+affinity: {}\r
+# Define separate affinity for arbiter pod\r
+affinityArbiter: {}\r
+\r
+## Tolerations\r
+## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\r
+tolerations: []\r
+\r
+## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets\r
+## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies\r
+updateStrategy:\r
+  type: RollingUpdate\r
+\r
+## Add sidecars to the pod\r
+##\r
+## For example:\r
+## sidecars:\r
+##   - name: your-image-name\r
+##     image: your-image\r
+##     imagePullPolicy: Always\r
+##     ports:\r
+##       - name: portname\r
+##         containerPort: 1234\r
+sidecars: []\r
+## Array to add extra volumes\r
+##\r
+extraVolumes: []\r
+## Array to add extra mounts (normally used with extraVolumes)\r
+##\r
+extraVolumeMounts: []\r
+\r
+## Add sidecars to the arbiter pod\r
+# used only when replica set is enabled\r
+##\r
+## For example:\r
+## sidecars:\r
+##   - name: your-image-name\r
+##     image: your-image\r
+##     imagePullPolicy: Always\r
+##     ports:\r
+##       - name: portname\r
+##         containerPort: 1234\r
+sidecarsArbiter: []\r
+## Array to add extra volumes to the arbiter\r
+# used only when replica set is enabled\r
+##\r
+extraVolumesArbiter: []\r
+## Array to add extra mounts (normally used with extraVolumes) to the arbiter\r
+# used only when replica set is enabled\r
+##\r
+extraVolumeMountsArbiter: []\r
+\r
+## Enable persistence using Persistent Volume Claims\r
+## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/\r
+##\r
+persistence:\r
+  enabled: true\r
+  ## A manually managed Persistent Volume and Claim\r
+  ## Requires persistence.enabled: true\r
+  ## If defined, PVC must be created manually before volume will be bound\r
+  ##\r
+  # existingClaim:\r
+\r
+  ## The path the volume will be mounted at, useful when using different\r
+  ## MongoDB images.\r
+  ##\r
+  mountPath: /bitnami/mongodb\r
+\r
+  ## The subdirectory of the volume to mount to, useful in dev environments\r
+  ## and one PV for multiple services.\r
+  ##\r
+  subPath: ""\r
+\r
+  ## mongodb data Persistent Volume Storage Class\r
+  ## If defined, storageClassName: <storageClass>\r
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning\r
+  ## If undefined (the default) or set to null, no storageClassName spec is\r
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on\r
+  ##   GKE, AWS & OpenStack)\r
+  ##\r
+  # storageClass: "-"\r
+  accessModes:\r
+    - ReadWriteOnce\r
+  size: 8Gi\r
+  annotations: {}\r
+\r
+## Configure the ingress resource that allows you to access the\r
+## MongoDB installation. Set up the URL\r
+## ref: http://kubernetes.io/docs/user-guide/ingress/\r
+##\r
+ingress:\r
+  ## Set to true to enable ingress record generation\r
+  enabled: false\r
+\r
+  ## Set this to true in order to add the corresponding annotations for cert-manager\r
+  certManager: false\r
+\r
+  ## Ingress annotations done as key:value pairs\r
+  ## For a full list of possible ingress annotations, please see\r
+  ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md\r
+  ##\r
+  ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set\r
+  ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set\r
+  annotations:\r
+  #  kubernetes.io/ingress.class: nginx\r
+\r
+  ## The list of hostnames to be covered with this ingress record.\r
+  ## Most likely this will be just one host, but in the event more hosts are needed, this is an array\r
+  hosts:\r
+  - name: mongodb.local\r
+    path: /\r
+\r
+  ## The tls configuration for the ingress\r
+  ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls\r
+  tls:\r
+  - hosts:\r
+      - mongodb.local\r
+    secretName: mongodb.local-tls\r
+\r
+  secrets:\r
+  ## If you're providing your own certificates, please use this to add the certificates as secrets\r
+  ## key and certificate should start with -----BEGIN CERTIFICATE----- or\r
+  ## -----BEGIN RSA PRIVATE KEY-----\r
+  ##\r
+  ## name should line up with a tlsSecret set further up\r
+  ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set\r
+  ##\r
+  ## It is also possible to create and manage the certificates outside of this helm chart\r
+  ## Please see README.md for more information\r
+  # - name: airflow.local-tls\r
+  #   key:\r
+  #   certificate:\r
+\r
+## Configure the options for init containers to be run before the main app containers\r
+## are started. All init containers are run sequentially and must exit without errors\r
+## for the next one to be started.\r
+## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/\r
+# extraInitContainers: |\r
+#   - name: do-something\r
+#     image: busybox\r
+#     command: ['do', 'something']\r
+\r
+## Configure extra options for liveness and readiness probes\r
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)\r
+livenessProbe:\r
+  enabled: true\r
+  initialDelaySeconds: 30\r
+  periodSeconds: 10\r
+  timeoutSeconds: 5\r
+  failureThreshold: 6\r
+  successThreshold: 1\r
+readinessProbe:\r
+  enabled: true\r
+  initialDelaySeconds: 5\r
+  periodSeconds: 10\r
+  timeoutSeconds: 5\r
+  failureThreshold: 6\r
+  successThreshold: 1\r
+\r
+# Define custom config map with init scripts\r
+initConfigMap: {}\r
+#  name: "init-config-map"\r
+\r
+## Entries for the MongoDB config file. For documentation of all options, see:\r
+##   http://docs.mongodb.org/manual/reference/configuration-options/\r
+##\r
+configmap:\r
+#  # where and how to store data.\r
+#  storage:\r
+#    dbPath: /bitnami/mongodb/data/db\r
+#    journal:\r
+#      enabled: true\r
+#    directoryPerDB: false\r
+#  # where to write logging data.\r
+#  systemLog:\r
+#    destination: file\r
+#    quiet: false\r
+#    logAppend: true\r
+#    logRotate: reopen\r
+#    path: /opt/bitnami/mongodb/logs/mongodb.log\r
+#    verbosity: 0\r
+#  # network interfaces\r
+#  net:\r
+#    port: 27017\r
+#    unixDomainSocket:\r
+#      enabled: true\r
+#      pathPrefix: /opt/bitnami/mongodb/tmp\r
+#    ipv6: false\r
+#    bindIpAll: true\r
+#  # replica set options\r
+#  #replication:\r
+#    #replSetName: replicaset\r
+#    #enableMajorityReadConcern: true\r
+#  # process management options\r
+#  processManagement:\r
+#     fork: false\r
+#     pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid\r
+#  # set parameter options\r
+#  setParameter:\r
+#     enableLocalhostAuthBypass: true\r
+#  # security options\r
+#  security:\r
+#    authorization: disabled\r
+#    #keyFile: /opt/bitnami/mongodb/conf/keyfile\r
+\r
+## Prometheus Exporter / Metrics\r
+##\r
+metrics:\r
+  enabled: false\r
+\r
+  image:\r
+    registry: docker.io\r
+    repository: bitnami/mongodb-exporter\r
+    tag: 0.10.0-debian-10-r41\r
+    pullPolicy: IfNotPresent\r
+    ## Optionally specify an array of imagePullSecrets.\r
+    ## Secrets must be manually created in the namespace.\r
+    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\r
+    ##\r
+    # pullSecrets:\r
+    #   - myRegistryKeySecretName\r
+\r
+  ## String with extra arguments to the metrics exporter\r
+  ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go\r
+  extraArgs: ""\r
+\r
+  ## Metrics exporter resource requests and limits\r
+  ## ref: http://kubernetes.io/docs/user-guide/compute-resources/\r
+  ##\r
+  # resources: {}\r
+\r
+  ## Metrics exporter liveness and readiness probes\r
+  ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)\r
+  livenessProbe:\r
+    enabled: false\r
+    initialDelaySeconds: 15\r
+    periodSeconds: 5\r
+    timeoutSeconds: 5\r
+    failureThreshold: 3\r
+    successThreshold: 1\r
+  readinessProbe:\r
+    enabled: false\r
+    initialDelaySeconds: 5\r
+    periodSeconds: 5\r
+    timeoutSeconds: 1\r
+    failureThreshold: 3\r
+    successThreshold: 1\r
+\r
+  ## Metrics exporter pod Annotation\r
+  podAnnotations:\r
+    prometheus.io/scrape: "true"\r
+    prometheus.io/port: "9216"\r
+\r
+  ## Prometheus Service Monitor\r
+  ## ref: https://github.com/coreos/prometheus-operator\r
+  ##      https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md\r
+  serviceMonitor:\r
+    ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry\r
+    enabled: false\r
+\r
+    ## Specify a namespace if needed\r
+    # namespace: monitoring\r
+\r
+    ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with\r
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec\r
+    additionalLabels: {}\r
+\r
+    ## Specify Metric Relabellings to add to the scrape endpoint\r
+    ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint\r
+    # relabellings:\r
+\r
+    alerting:\r
+      ## Define individual alerting rules as required\r
+      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup\r
+      ##      https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/\r
+      rules: {}\r
+\r
+      ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with\r
+      ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec\r
+      additionalLabels: {}\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml
new file mode 100644 (file)
index 0000000..e803431
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: v1\r
+name: mysql\r
+version: 1.6.3\r
+appVersion: 5.7.28\r
+description: Fast, reliable, scalable, and easy to use open-source relational database\r
+  system.\r
+keywords:\r
+- mysql\r
+- database\r
+- sql\r
+home: https://www.mysql.com/\r
+icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png\r
+sources:\r
+- https://github.com/kubernetes/charts\r
+- https://github.com/docker-library/mysql\r
+maintainers:\r
+engine: gotpl\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh
new file mode 100644 (file)
index 0000000..9b748ca
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash\r
+\r
+sleep 20;\r
+mysql -u otfuser otf_camunda -pToday.123 < /data/scripts/mysql_engine_7.10.0.sql\r
+mysql -u otfuser otf_camunda -pToday.123 < /data/scripts/mysql_identity_7.10.0.sql\r
+\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql
new file mode 100644 (file)
index 0000000..aefe0cb
--- /dev/null
@@ -0,0 +1,1298 @@
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+create table ACT_GE_PROPERTY (\r
+    NAME_ varchar(64),\r
+    VALUE_ varchar(300),\r
+    REV_ integer,\r
+    primary key (NAME_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('schema.version', 'fox', 1);\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('schema.history', 'create(fox)', 1);\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('next.dbid', '1', 1);\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('deployment.lock', '0', 1);\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('history.cleanup.job.lock', '0', 1);\r
+\r
+insert into ACT_GE_PROPERTY\r
+values ('startup.lock', '0', 1);\r
+\r
+create table ACT_GE_BYTEARRAY (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    NAME_ varchar(255),\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    BYTES_ LONGBLOB,\r
+    GENERATED_ TINYINT,\r
+    TENANT_ID_ varchar(64),\r
+    TYPE_ integer,\r
+    CREATE_TIME_ datetime,\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RE_DEPLOYMENT (\r
+    ID_ varchar(64),\r
+    NAME_ varchar(255),\r
+    DEPLOY_TIME_ timestamp,\r
+    SOURCE_ varchar(255),\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_EXECUTION (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    BUSINESS_KEY_ varchar(255),\r
+    PARENT_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    SUPER_EXEC_ varchar(64),\r
+    SUPER_CASE_EXEC_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    ACT_ID_ varchar(255),\r
+    ACT_INST_ID_ varchar(64),\r
+    IS_ACTIVE_ TINYINT,\r
+    IS_CONCURRENT_ TINYINT,\r
+    IS_SCOPE_ TINYINT,\r
+    IS_EVENT_SCOPE_ TINYINT,\r
+    SUSPENSION_STATE_ integer,\r
+    CACHED_ENT_STATE_ integer,\r
+    SEQUENCE_COUNTER_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_JOB (\r
+    ID_ varchar(64) NOT NULL,\r
+    REV_ integer,\r
+    TYPE_ varchar(255) NOT NULL,\r
+    LOCK_EXP_TIME_ timestamp NULL,\r
+    LOCK_OWNER_ varchar(255),\r
+    EXCLUSIVE_ boolean,\r
+    EXECUTION_ID_ varchar(64),\r
+    PROCESS_INSTANCE_ID_ varchar(64),\r
+    PROCESS_DEF_ID_ varchar(64),\r
+    PROCESS_DEF_KEY_ varchar(255),\r
+    RETRIES_ integer,\r
+    EXCEPTION_STACK_ID_ varchar(64),\r
+    EXCEPTION_MSG_ varchar(4000),\r
+    DUEDATE_ timestamp NULL,\r
+    REPEAT_ varchar(255),\r
+    HANDLER_TYPE_ varchar(255),\r
+    HANDLER_CFG_ varchar(4000),\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    SUSPENSION_STATE_ integer NOT NULL DEFAULT 1,\r
+    JOB_DEF_ID_ varchar(64),\r
+    PRIORITY_ bigint NOT NULL DEFAULT 0,\r
+    SEQUENCE_COUNTER_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    CREATE_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_JOBDEF (\r
+    ID_ varchar(64) NOT NULL,\r
+    REV_ integer,\r
+    PROC_DEF_ID_ varchar(64),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    ACT_ID_ varchar(255),\r
+    JOB_TYPE_ varchar(255) NOT NULL,\r
+    JOB_CONFIGURATION_ varchar(255),\r
+    SUSPENSION_STATE_ integer,\r
+    JOB_PRIORITY_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RE_PROCDEF (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    CATEGORY_ varchar(255),\r
+    NAME_ varchar(255),\r
+    KEY_ varchar(255) not null,\r
+    VERSION_ integer not null,\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    RESOURCE_NAME_ varchar(4000),\r
+    DGRM_RESOURCE_NAME_ varchar(4000),\r
+    HAS_START_FORM_KEY_ TINYINT,\r
+    SUSPENSION_STATE_ integer,\r
+    TENANT_ID_ varchar(64),\r
+    VERSION_TAG_ varchar(64),\r
+    HISTORY_TTL_ integer,\r
+    STARTABLE_ boolean NOT NULL default TRUE,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_TASK (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    EXECUTION_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_DEF_ID_ varchar(64),\r
+    NAME_ varchar(255),\r
+    PARENT_TASK_ID_ varchar(64),\r
+    DESCRIPTION_ varchar(4000),\r
+    TASK_DEF_KEY_ varchar(255),\r
+    OWNER_ varchar(255),\r
+    ASSIGNEE_ varchar(255),\r
+    DELEGATION_ varchar(64),\r
+    PRIORITY_ integer,\r
+    CREATE_TIME_ timestamp,\r
+    DUE_DATE_ datetime,\r
+    FOLLOW_UP_DATE_ datetime,\r
+    SUSPENSION_STATE_ integer,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_IDENTITYLINK (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    GROUP_ID_ varchar(255),\r
+    TYPE_ varchar(255),\r
+    USER_ID_ varchar(255),\r
+    TASK_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_VARIABLE (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    TYPE_ varchar(255) not null,\r
+    NAME_ varchar(255) not null,\r
+    EXECUTION_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    TASK_ID_ varchar(64),\r
+    BYTEARRAY_ID_ varchar(64),\r
+    DOUBLE_ double,\r
+    LONG_ bigint,\r
+    TEXT_ varchar(4000),\r
+    TEXT2_ varchar(4000),\r
+    VAR_SCOPE_ varchar(64) not null,\r
+    SEQUENCE_COUNTER_ bigint,\r
+    IS_CONCURRENT_LOCAL_ TINYINT,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_EVENT_SUBSCR (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    EVENT_TYPE_ varchar(255) not null,\r
+    EVENT_NAME_ varchar(255),\r
+    EXECUTION_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    ACTIVITY_ID_ varchar(255),\r
+    CONFIGURATION_ varchar(255),\r
+    CREATED_ timestamp not null,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_INCIDENT (\r
+  ID_ varchar(64) not null,\r
+  REV_ integer not null,\r
+  INCIDENT_TIMESTAMP_ timestamp not null,\r
+  INCIDENT_MSG_ varchar(4000),\r
+  INCIDENT_TYPE_ varchar(255) not null,\r
+  EXECUTION_ID_ varchar(64),\r
+  ACTIVITY_ID_ varchar(255),\r
+  PROC_INST_ID_ varchar(64),\r
+  PROC_DEF_ID_ varchar(64),\r
+  CAUSE_INCIDENT_ID_ varchar(64),\r
+  ROOT_CAUSE_INCIDENT_ID_ varchar(64),\r
+  CONFIGURATION_ varchar(255),\r
+  TENANT_ID_ varchar(64),\r
+  JOB_DEF_ID_ varchar(64),\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_AUTHORIZATION (\r
+  ID_ varchar(64) not null,\r
+  REV_ integer not null,\r
+  TYPE_ integer not null,\r
+  GROUP_ID_ varchar(255),\r
+  USER_ID_ varchar(255),\r
+  RESOURCE_TYPE_ integer not null,\r
+  RESOURCE_ID_ varchar(255),\r
+  PERMS_ integer,\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_FILTER (\r
+  ID_ varchar(64) not null,\r
+  REV_ integer not null,\r
+  RESOURCE_TYPE_ varchar(255) not null,\r
+  NAME_ varchar(255) not null,\r
+  OWNER_ varchar(255),\r
+  QUERY_ LONGTEXT not null,\r
+  PROPERTIES_ LONGTEXT,\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_METER_LOG (\r
+  ID_ varchar(64) not null,\r
+  NAME_ varchar(64) not null,\r
+  REPORTER_ varchar(255),\r
+  VALUE_ bigint,\r
+  TIMESTAMP_ timestamp,\r
+  MILLISECONDS_ bigint DEFAULT 0,\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_EXT_TASK (\r
+  ID_ varchar(64) not null,\r
+  REV_ integer not null,\r
+  WORKER_ID_ varchar(255),\r
+  TOPIC_NAME_ varchar(255),\r
+  RETRIES_ integer,\r
+  ERROR_MSG_ varchar(4000),\r
+  ERROR_DETAILS_ID_ varchar(64),\r
+  LOCK_EXP_TIME_ timestamp NULL,\r
+  SUSPENSION_STATE_ integer,\r
+  EXECUTION_ID_ varchar(64),\r
+  PROC_INST_ID_ varchar(64),\r
+  PROC_DEF_ID_ varchar(64),\r
+  PROC_DEF_KEY_ varchar(255),\r
+  ACT_ID_ varchar(255),\r
+  ACT_INST_ID_ varchar(64),\r
+  TENANT_ID_ varchar(64),\r
+  PRIORITY_ bigint NOT NULL DEFAULT 0,\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_RU_BATCH (\r
+  ID_ varchar(64) not null,\r
+  REV_ integer not null,\r
+  TYPE_ varchar(255),\r
+  TOTAL_JOBS_ integer,\r
+  JOBS_CREATED_ integer,\r
+  JOBS_PER_SEED_ integer,\r
+  INVOCATIONS_PER_JOB_ integer,\r
+  SEED_JOB_DEF_ID_ varchar(64),\r
+  BATCH_JOB_DEF_ID_ varchar(64),\r
+  MONITOR_JOB_DEF_ID_ varchar(64),\r
+  SUSPENSION_STATE_ integer,\r
+  CONFIGURATION_ varchar(255),\r
+  TENANT_ID_ varchar(64),\r
+  CREATE_USER_ID_ varchar(255),\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create index ACT_IDX_EXEC_ROOT_PI on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_);\r
+create index ACT_IDX_EXEC_TENANT_ID on ACT_RU_EXECUTION(TENANT_ID_);\r
+create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_);\r
+create index ACT_IDX_TASK_ASSIGNEE on ACT_RU_TASK(ASSIGNEE_);\r
+create index ACT_IDX_TASK_TENANT_ID on ACT_RU_TASK(TENANT_ID_);\r
+create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_);\r
+create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_);\r
+create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_);\r
+create index ACT_IDX_EVENT_SUBSCR_TENANT_ID on ACT_RU_EVENT_SUBSCR(TENANT_ID_);\r
+create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_);\r
+create index ACT_IDX_VARIABLE_TENANT_ID on ACT_RU_VARIABLE(TENANT_ID_);\r
+create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_);\r
+create index ACT_IDX_INC_CONFIGURATION on ACT_RU_INCIDENT(CONFIGURATION_);\r
+create index ACT_IDX_INC_TENANT_ID on ACT_RU_INCIDENT(TENANT_ID_);\r
+-- CAM-5914\r
+create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_);\r
+-- this index needs to be limited in mysql see CAM-6938\r
+create index ACT_IDX_JOB_HANDLER on ACT_RU_JOB(HANDLER_TYPE_(100),HANDLER_CFG_(155));\r
+create index ACT_IDX_JOB_PROCINST on ACT_RU_JOB(PROCESS_INSTANCE_ID_);\r
+create index ACT_IDX_JOB_TENANT_ID on ACT_RU_JOB(TENANT_ID_);\r
+create index ACT_IDX_JOBDEF_TENANT_ID on ACT_RU_JOBDEF(TENANT_ID_);\r
+\r
+-- new metric milliseconds column\r
+CREATE INDEX ACT_IDX_METER_LOG_MS ON ACT_RU_METER_LOG(MILLISECONDS_);\r
+CREATE INDEX ACT_IDX_METER_LOG_NAME_MS ON ACT_RU_METER_LOG(NAME_, MILLISECONDS_);\r
+CREATE INDEX ACT_IDX_METER_LOG_REPORT ON ACT_RU_METER_LOG(NAME_, REPORTER_, MILLISECONDS_);\r
+\r
+-- old metric timestamp column\r
+CREATE INDEX ACT_IDX_METER_LOG_TIME ON ACT_RU_METER_LOG(TIMESTAMP_);\r
+CREATE INDEX ACT_IDX_METER_LOG ON ACT_RU_METER_LOG(NAME_, TIMESTAMP_);\r
+\r
+create index ACT_IDX_EXT_TASK_TOPIC on ACT_RU_EXT_TASK(TOPIC_NAME_);\r
+create index ACT_IDX_EXT_TASK_TENANT_ID on ACT_RU_EXT_TASK(TENANT_ID_);\r
+create index ACT_IDX_EXT_TASK_PRIORITY ON ACT_RU_EXT_TASK(PRIORITY_);\r
+create index ACT_IDX_EXT_TASK_ERR_DETAILS ON ACT_RU_EXT_TASK(ERROR_DETAILS_ID_);\r
+create index ACT_IDX_AUTH_GROUP_ID on ACT_RU_AUTHORIZATION(GROUP_ID_);\r
+create index ACT_IDX_JOB_JOB_DEF_ID on ACT_RU_JOB(JOB_DEF_ID_);\r
+\r
+alter table ACT_GE_BYTEARRAY\r
+    add constraint ACT_FK_BYTEARR_DEPL\r
+    foreign key (DEPLOYMENT_ID_)\r
+    references ACT_RE_DEPLOYMENT (ID_);\r
+\r
+alter table ACT_RU_EXECUTION\r
+    add constraint ACT_FK_EXE_PROCINST\r
+    foreign key (PROC_INST_ID_)\r
+    references ACT_RU_EXECUTION (ID_) on delete cascade on update cascade;\r
+\r
+alter table ACT_RU_EXECUTION\r
+    add constraint ACT_FK_EXE_PARENT\r
+    foreign key (PARENT_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_EXECUTION\r
+    add constraint ACT_FK_EXE_SUPER\r
+    foreign key (SUPER_EXEC_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_EXECUTION\r
+    add constraint ACT_FK_EXE_PROCDEF\r
+    foreign key (PROC_DEF_ID_)\r
+    references ACT_RE_PROCDEF (ID_);\r
+\r
+alter table ACT_RU_IDENTITYLINK\r
+    add constraint ACT_FK_TSKASS_TASK\r
+    foreign key (TASK_ID_)\r
+    references ACT_RU_TASK (ID_);\r
+\r
+alter table ACT_RU_IDENTITYLINK\r
+    add constraint ACT_FK_ATHRZ_PROCEDEF\r
+    foreign key (PROC_DEF_ID_)\r
+    references ACT_RE_PROCDEF(ID_);\r
+\r
+alter table ACT_RU_TASK\r
+    add constraint ACT_FK_TASK_EXE\r
+    foreign key (EXECUTION_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_TASK\r
+    add constraint ACT_FK_TASK_PROCINST\r
+    foreign key (PROC_INST_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_TASK\r
+  add constraint ACT_FK_TASK_PROCDEF\r
+  foreign key (PROC_DEF_ID_)\r
+  references ACT_RE_PROCDEF (ID_);\r
+\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_FK_VAR_EXE\r
+    foreign key (EXECUTION_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_FK_VAR_PROCINST\r
+    foreign key (PROC_INST_ID_)\r
+    references ACT_RU_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_FK_VAR_BYTEARRAY\r
+    foreign key (BYTEARRAY_ID_)\r
+    references ACT_GE_BYTEARRAY (ID_);\r
+\r
+alter table ACT_RU_JOB\r
+    add constraint ACT_FK_JOB_EXCEPTION\r
+    foreign key (EXCEPTION_STACK_ID_)\r
+    references ACT_GE_BYTEARRAY (ID_);\r
+\r
+alter table ACT_RU_EVENT_SUBSCR\r
+    add constraint ACT_FK_EVENT_EXEC\r
+    foreign key (EXECUTION_ID_)\r
+    references ACT_RU_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_EXE\r
+    foreign key (EXECUTION_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_PROCINST\r
+    foreign key (PROC_INST_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_PROCDEF\r
+    foreign key (PROC_DEF_ID_)\r
+    references ACT_RE_PROCDEF (ID_);\r
+\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_CAUSE\r
+    foreign key (CAUSE_INCIDENT_ID_)\r
+    references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade;\r
+\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_RCAUSE\r
+    foreign key (ROOT_CAUSE_INCIDENT_ID_)\r
+    references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade;\r
+\r
+alter table ACT_RU_EXT_TASK\r
+    add constraint ACT_FK_EXT_TASK_ERROR_DETAILS\r
+    foreign key (ERROR_DETAILS_ID_)\r
+    references ACT_GE_BYTEARRAY (ID_);\r
+\r
+create index ACT_IDX_INC_JOB_DEF on ACT_RU_INCIDENT(JOB_DEF_ID_);\r
+alter table ACT_RU_INCIDENT\r
+    add constraint ACT_FK_INC_JOB_DEF\r
+    foreign key (JOB_DEF_ID_)\r
+    references ACT_RU_JOBDEF (ID_);\r
+\r
+alter table ACT_RU_AUTHORIZATION\r
+    add constraint ACT_UNIQ_AUTH_USER\r
+    unique (USER_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_);\r
+\r
+alter table ACT_RU_AUTHORIZATION\r
+    add constraint ACT_UNIQ_AUTH_GROUP\r
+    unique (GROUP_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_);\r
+\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_UNIQ_VARIABLE\r
+    unique (VAR_SCOPE_, NAME_);\r
+\r
+alter table ACT_RU_EXT_TASK\r
+    add constraint ACT_FK_EXT_TASK_EXE\r
+    foreign key (EXECUTION_ID_)\r
+    references ACT_RU_EXECUTION (ID_);\r
+\r
+create index ACT_IDX_BATCH_SEED_JOB_DEF ON ACT_RU_BATCH(SEED_JOB_DEF_ID_);\r
+alter table ACT_RU_BATCH\r
+    add constraint ACT_FK_BATCH_SEED_JOB_DEF\r
+    foreign key (SEED_JOB_DEF_ID_)\r
+    references ACT_RU_JOBDEF (ID_);\r
+\r
+create index ACT_IDX_BATCH_MONITOR_JOB_DEF ON ACT_RU_BATCH(MONITOR_JOB_DEF_ID_);\r
+alter table ACT_RU_BATCH\r
+    add constraint ACT_FK_BATCH_MONITOR_JOB_DEF\r
+    foreign key (MONITOR_JOB_DEF_ID_)\r
+    references ACT_RU_JOBDEF (ID_);\r
+\r
+create index ACT_IDX_BATCH_JOB_DEF ON ACT_RU_BATCH(BATCH_JOB_DEF_ID_);\r
+alter table ACT_RU_BATCH\r
+    add constraint ACT_FK_BATCH_JOB_DEF\r
+    foreign key (BATCH_JOB_DEF_ID_)\r
+    references ACT_RU_JOBDEF (ID_);\r
+\r
+-- indexes for deadlock problems - https://app.camunda.com/jira/browse/CAM-2567 --\r
+create index ACT_IDX_INC_CAUSEINCID on ACT_RU_INCIDENT(CAUSE_INCIDENT_ID_);\r
+create index ACT_IDX_INC_EXID on ACT_RU_INCIDENT(EXECUTION_ID_);\r
+create index ACT_IDX_INC_PROCDEFID on ACT_RU_INCIDENT(PROC_DEF_ID_);\r
+create index ACT_IDX_INC_PROCINSTID on ACT_RU_INCIDENT(PROC_INST_ID_);\r
+create index ACT_IDX_INC_ROOTCAUSEINCID on ACT_RU_INCIDENT(ROOT_CAUSE_INCIDENT_ID_);\r
+-- index for deadlock problem - https://app.camunda.com/jira/browse/CAM-4440 --\r
+create index ACT_IDX_AUTH_RESOURCE_ID on ACT_RU_AUTHORIZATION(RESOURCE_ID_);\r
+-- index to prevent deadlock on fk constraint - https://app.camunda.com/jira/browse/CAM-5440 --\r
+create index ACT_IDX_EXT_TASK_EXEC on ACT_RU_EXT_TASK(EXECUTION_ID_);\r
+\r
+-- indexes to improve deployment\r
+create index ACT_IDX_BYTEARRAY_ROOT_PI on ACT_GE_BYTEARRAY(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_BYTEARRAY_RM_TIME on ACT_GE_BYTEARRAY(REMOVAL_TIME_);\r
+create index ACT_IDX_BYTEARRAY_NAME on ACT_GE_BYTEARRAY(NAME_);\r
+create index ACT_IDX_DEPLOYMENT_NAME on ACT_RE_DEPLOYMENT(NAME_);\r
+create index ACT_IDX_DEPLOYMENT_TENANT_ID on ACT_RE_DEPLOYMENT(TENANT_ID_);\r
+create index ACT_IDX_JOBDEF_PROC_DEF_ID ON ACT_RU_JOBDEF(PROC_DEF_ID_);\r
+create index ACT_IDX_JOB_HANDLER_TYPE ON ACT_RU_JOB(HANDLER_TYPE_);\r
+create index ACT_IDX_EVENT_SUBSCR_EVT_NAME ON ACT_RU_EVENT_SUBSCR(EVENT_NAME_);\r
+create index ACT_IDX_PROCDEF_DEPLOYMENT_ID ON ACT_RE_PROCDEF(DEPLOYMENT_ID_);\r
+create index ACT_IDX_PROCDEF_TENANT_ID ON ACT_RE_PROCDEF(TENANT_ID_);\r
+create index ACT_IDX_PROCDEF_VER_TAG ON ACT_RE_PROCDEF(VERSION_TAG_);\r
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+-- create case definition table --\r
+create table ACT_RE_CASE_DEF (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    CATEGORY_ varchar(255),\r
+    NAME_ varchar(255),\r
+    KEY_ varchar(255) not null,\r
+    VERSION_ integer not null,\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    RESOURCE_NAME_ varchar(4000),\r
+    DGRM_RESOURCE_NAME_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    HISTORY_TTL_ integer,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create case execution table --\r
+create table ACT_RU_CASE_EXECUTION (\r
+    ID_ varchar(64) NOT NULL,\r
+    REV_ integer,\r
+    CASE_INST_ID_ varchar(64),\r
+    SUPER_CASE_EXEC_ varchar(64),\r
+    SUPER_EXEC_ varchar(64),\r
+    BUSINESS_KEY_ varchar(255),\r
+    PARENT_ID_ varchar(64),\r
+    CASE_DEF_ID_ varchar(64),\r
+    ACT_ID_ varchar(255),\r
+    PREV_STATE_ integer,\r
+    CURRENT_STATE_ integer,\r
+    REQUIRED_ boolean,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create case sentry part table --\r
+\r
+create table ACT_RU_CASE_SENTRY_PART (\r
+    ID_ varchar(64) NOT NULL,\r
+    REV_ integer,\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_EXEC_ID_ varchar(64),\r
+    SENTRY_ID_ varchar(255),\r
+    TYPE_ varchar(255),\r
+    SOURCE_CASE_EXEC_ID_ varchar(64),\r
+    STANDARD_EVENT_ varchar(255),\r
+    SOURCE_ varchar(255),\r
+    VARIABLE_EVENT_ varchar(255),\r
+    VARIABLE_NAME_ varchar(255),\r
+    SATISFIED_ boolean,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create index on business key --\r
+create index ACT_IDX_CASE_EXEC_BUSKEY on ACT_RU_CASE_EXECUTION(BUSINESS_KEY_);\r
+\r
+-- https://app.camunda.com/jira/browse/CAM-9165\r
+create index ACT_IDX_CASE_EXE_CASE_INST on ACT_RU_CASE_EXECUTION(CASE_INST_ID_);\r
+\r
+-- create foreign key constraints on ACT_RU_CASE_EXECUTION --\r
+alter table ACT_RU_CASE_EXECUTION\r
+    add constraint ACT_FK_CASE_EXE_CASE_INST\r
+    foreign key (CASE_INST_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_) on delete cascade on update cascade;\r
+\r
+alter table ACT_RU_CASE_EXECUTION\r
+    add constraint ACT_FK_CASE_EXE_PARENT\r
+    foreign key (PARENT_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_CASE_EXECUTION\r
+    add constraint ACT_FK_CASE_EXE_CASE_DEF\r
+    foreign key (CASE_DEF_ID_)\r
+    references ACT_RE_CASE_DEF(ID_);\r
+\r
+-- create foreign key constraints on ACT_RU_VARIABLE --\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_FK_VAR_CASE_EXE\r
+    foreign key (CASE_EXECUTION_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_VARIABLE\r
+    add constraint ACT_FK_VAR_CASE_INST\r
+    foreign key (CASE_INST_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+-- create foreign key constraints on ACT_RU_TASK --\r
+alter table ACT_RU_TASK\r
+    add constraint ACT_FK_TASK_CASE_EXE\r
+    foreign key (CASE_EXECUTION_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_TASK\r
+  add constraint ACT_FK_TASK_CASE_DEF\r
+  foreign key (CASE_DEF_ID_)\r
+  references ACT_RE_CASE_DEF(ID_);\r
+\r
+-- create foreign key constraints on ACT_RU_CASE_SENTRY_PART --\r
+alter table ACT_RU_CASE_SENTRY_PART\r
+    add constraint ACT_FK_CASE_SENTRY_CASE_INST\r
+    foreign key (CASE_INST_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+alter table ACT_RU_CASE_SENTRY_PART\r
+    add constraint ACT_FK_CASE_SENTRY_CASE_EXEC\r
+    foreign key (CASE_EXEC_ID_)\r
+    references ACT_RU_CASE_EXECUTION(ID_);\r
+\r
+create index ACT_IDX_CASE_DEF_TENANT_ID on ACT_RE_CASE_DEF(TENANT_ID_);\r
+create index ACT_IDX_CASE_EXEC_TENANT_ID on ACT_RU_CASE_EXECUTION(TENANT_ID_);\r
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+-- create decision definition table --\r
+create table ACT_RE_DECISION_DEF (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    CATEGORY_ varchar(255),\r
+    NAME_ varchar(255),\r
+    KEY_ varchar(255) not null,\r
+    VERSION_ integer not null,\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    RESOURCE_NAME_ varchar(4000),\r
+    DGRM_RESOURCE_NAME_ varchar(4000),\r
+    DEC_REQ_ID_ varchar(64),\r
+    DEC_REQ_KEY_ varchar(255),\r
+    TENANT_ID_ varchar(64),\r
+    HISTORY_TTL_ integer,\r
+    VERSION_TAG_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create decision requirements definition table --\r
+create table ACT_RE_DECISION_REQ_DEF (\r
+    ID_ varchar(64) NOT NULL,\r
+    REV_ integer,\r
+    CATEGORY_ varchar(255),\r
+    NAME_ varchar(255),\r
+    KEY_ varchar(255) NOT NULL,\r
+    VERSION_ integer NOT NULL,\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    RESOURCE_NAME_ varchar(4000),\r
+    DGRM_RESOURCE_NAME_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+alter table ACT_RE_DECISION_DEF\r
+    add constraint ACT_FK_DEC_REQ\r
+    foreign key (DEC_REQ_ID_)\r
+    references ACT_RE_DECISION_REQ_DEF(ID_);\r
+\r
+create index ACT_IDX_DEC_DEF_TENANT_ID on ACT_RE_DECISION_DEF(TENANT_ID_);\r
+create index ACT_IDX_DEC_DEF_REQ_ID on ACT_RE_DECISION_DEF(DEC_REQ_ID_);\r
+create index ACT_IDX_DEC_REQ_DEF_TENANT_ID on ACT_RE_DECISION_REQ_DEF(TENANT_ID_);\r
+\r
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+create table ACT_HI_PROCINST (\r
+    ID_ varchar(64) not null,\r
+    PROC_INST_ID_ varchar(64) not null,\r
+    BUSINESS_KEY_ varchar(255),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64) not null,\r
+    START_TIME_ datetime not null,\r
+    END_TIME_ datetime,\r
+    REMOVAL_TIME_ datetime,\r
+    DURATION_ bigint,\r
+    START_USER_ID_ varchar(255),\r
+    START_ACT_ID_ varchar(255),\r
+    END_ACT_ID_ varchar(255),\r
+    SUPER_PROCESS_INSTANCE_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    SUPER_CASE_INSTANCE_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    DELETE_REASON_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    STATE_ varchar(255),\r
+    primary key (ID_),\r
+    unique (PROC_INST_ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_ACTINST (\r
+    ID_ varchar(64) not null,\r
+    PARENT_ACT_INST_ID_ varchar(64),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64) not null,\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64) not null,\r
+    EXECUTION_ID_ varchar(64) not null,\r
+    ACT_ID_ varchar(255) not null,\r
+    TASK_ID_ varchar(64),\r
+    CALL_PROC_INST_ID_ varchar(64),\r
+    CALL_CASE_INST_ID_ varchar(64),\r
+    ACT_NAME_ varchar(255),\r
+    ACT_TYPE_ varchar(255) not null,\r
+    ASSIGNEE_ varchar(64),\r
+    START_TIME_ datetime not null,\r
+    END_TIME_ datetime,\r
+    DURATION_ bigint,\r
+    ACT_INST_STATE_ integer,\r
+    SEQUENCE_COUNTER_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_TASKINST (\r
+    ID_ varchar(64) not null,\r
+    TASK_DEF_KEY_ varchar(255),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    EXECUTION_ID_ varchar(64),\r
+    CASE_DEF_KEY_ varchar(255),\r
+    CASE_DEF_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    ACT_INST_ID_ varchar(64),\r
+    NAME_ varchar(255),\r
+    PARENT_TASK_ID_ varchar(64),\r
+    DESCRIPTION_ varchar(4000),\r
+    OWNER_ varchar(255),\r
+    ASSIGNEE_ varchar(255),\r
+    START_TIME_ datetime not null,\r
+    END_TIME_ datetime,\r
+    DURATION_ bigint,\r
+    DELETE_REASON_ varchar(4000),\r
+    PRIORITY_ integer,\r
+    DUE_DATE_ datetime,\r
+    FOLLOW_UP_DATE_ datetime,\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_VARINST (\r
+    ID_ varchar(64) not null,\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    EXECUTION_ID_ varchar(64),\r
+    ACT_INST_ID_ varchar(64),\r
+    CASE_DEF_KEY_ varchar(255),\r
+    CASE_DEF_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    TASK_ID_ varchar(64),\r
+    NAME_ varchar(255) not null,\r
+    VAR_TYPE_ varchar(100),\r
+    CREATE_TIME_ datetime,\r
+    REV_ integer,\r
+    BYTEARRAY_ID_ varchar(64),\r
+    DOUBLE_ double,\r
+    LONG_ bigint,\r
+    TEXT_ varchar(4000),\r
+    TEXT2_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    STATE_ varchar(20),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_DETAIL (\r
+    ID_ varchar(64) not null,\r
+    TYPE_ varchar(255) not null,\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    EXECUTION_ID_ varchar(64),\r
+    CASE_DEF_KEY_ varchar(255),\r
+    CASE_DEF_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    TASK_ID_ varchar(64),\r
+    ACT_INST_ID_ varchar(64),\r
+    VAR_INST_ID_ varchar(64),\r
+    NAME_ varchar(255) not null,\r
+    VAR_TYPE_ varchar(255),\r
+    REV_ integer,\r
+    TIME_ datetime not null,\r
+    BYTEARRAY_ID_ varchar(64),\r
+    DOUBLE_ double,\r
+    LONG_ bigint,\r
+    TEXT_ varchar(4000),\r
+    TEXT2_ varchar(4000),\r
+    SEQUENCE_COUNTER_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    OPERATION_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_IDENTITYLINK (\r
+    ID_ varchar(64) not null,\r
+    TIMESTAMP_ timestamp not null,\r
+    TYPE_ varchar(255),\r
+    USER_ID_ varchar(255),\r
+    GROUP_ID_ varchar(255),\r
+    TASK_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    OPERATION_TYPE_ varchar(64),\r
+    ASSIGNER_ID_ varchar(64),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_COMMENT (\r
+    ID_ varchar(64) not null,\r
+    TYPE_ varchar(255),\r
+    TIME_ datetime not null,\r
+    USER_ID_ varchar(255),\r
+    TASK_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    ACTION_ varchar(255),\r
+    MESSAGE_ varchar(4000),\r
+    FULL_MSG_ LONGBLOB,\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_ATTACHMENT (\r
+    ID_ varchar(64) not null,\r
+    REV_ integer,\r
+    USER_ID_ varchar(255),\r
+    NAME_ varchar(255),\r
+    DESCRIPTION_ varchar(4000),\r
+    TYPE_ varchar(255),\r
+    TASK_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    URL_ varchar(4000),\r
+    CONTENT_ID_ varchar(64),\r
+    TENANT_ID_ varchar(64),\r
+    CREATE_TIME_ datetime,\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_OP_LOG (\r
+    ID_ varchar(64) not null,\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    EXECUTION_ID_ varchar(64),\r
+    CASE_DEF_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    CASE_EXECUTION_ID_ varchar(64),\r
+    TASK_ID_ varchar(64),\r
+    JOB_ID_ varchar(64),\r
+    JOB_DEF_ID_ varchar(64),\r
+    BATCH_ID_ varchar(64),\r
+    USER_ID_ varchar(255),\r
+    TIMESTAMP_ timestamp not null,\r
+    OPERATION_TYPE_ varchar(64),\r
+    OPERATION_ID_ varchar(64),\r
+    ENTITY_TYPE_ varchar(30),\r
+    PROPERTY_ varchar(64),\r
+    ORG_VALUE_ varchar(4000),\r
+    NEW_VALUE_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_INCIDENT (\r
+  ID_ varchar(64) not null,\r
+  PROC_DEF_KEY_ varchar(255),\r
+  PROC_DEF_ID_ varchar(64),\r
+  ROOT_PROC_INST_ID_ varchar(64),\r
+  PROC_INST_ID_ varchar(64),\r
+  EXECUTION_ID_ varchar(64),\r
+  CREATE_TIME_ timestamp not null,\r
+  END_TIME_ timestamp null,\r
+  INCIDENT_MSG_ varchar(4000),\r
+  INCIDENT_TYPE_ varchar(255) not null,\r
+  ACTIVITY_ID_ varchar(255),\r
+  CAUSE_INCIDENT_ID_ varchar(64),\r
+  ROOT_CAUSE_INCIDENT_ID_ varchar(64),\r
+  CONFIGURATION_ varchar(255),\r
+  INCIDENT_STATE_ integer,\r
+  TENANT_ID_ varchar(64),\r
+  JOB_DEF_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+  primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_JOB_LOG (\r
+    ID_ varchar(64) not null,\r
+    TIMESTAMP_ timestamp not null,\r
+    JOB_ID_ varchar(64) not null,\r
+    JOB_DUEDATE_ timestamp NULL,\r
+    JOB_RETRIES_ integer,\r
+    JOB_PRIORITY_ bigint NOT NULL DEFAULT 0,\r
+    JOB_EXCEPTION_MSG_ varchar(4000),\r
+    JOB_EXCEPTION_STACK_ID_ varchar(64),\r
+    JOB_STATE_ integer,\r
+    JOB_DEF_ID_ varchar(64),\r
+    JOB_DEF_TYPE_ varchar(255),\r
+    JOB_DEF_CONFIGURATION_ varchar(255),\r
+    ACT_ID_ varchar(255),\r
+    EXECUTION_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROCESS_INSTANCE_ID_ varchar(64),\r
+    PROCESS_DEF_ID_ varchar(64),\r
+    PROCESS_DEF_KEY_ varchar(255),\r
+    DEPLOYMENT_ID_ varchar(64),\r
+    SEQUENCE_COUNTER_ bigint,\r
+    TENANT_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_BATCH (\r
+    ID_ varchar(64) not null,\r
+    TYPE_ varchar(255),\r
+    TOTAL_JOBS_ integer,\r
+    JOBS_PER_SEED_ integer,\r
+    INVOCATIONS_PER_JOB_ integer,\r
+    SEED_JOB_DEF_ID_ varchar(64),\r
+    MONITOR_JOB_DEF_ID_ varchar(64),\r
+    BATCH_JOB_DEF_ID_ varchar(64),\r
+    TENANT_ID_  varchar(64),\r
+    CREATE_USER_ID_ varchar(255),\r
+    START_TIME_ datetime not null,\r
+    END_TIME_ datetime,\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_EXT_TASK_LOG (\r
+    ID_ varchar(64) not null,\r
+    TIMESTAMP_ timestamp not null,\r
+    EXT_TASK_ID_ varchar(64) not null,\r
+    RETRIES_ integer,\r
+    TOPIC_NAME_ varchar(255),\r
+    WORKER_ID_ varchar(255),\r
+    PRIORITY_ bigint not null default 0,\r
+    ERROR_MSG_ varchar(4000),\r
+    ERROR_DETAILS_ID_ varchar(64),\r
+    ACT_ID_ varchar(255),\r
+    ACT_INST_ID_ varchar(64),\r
+    EXECUTION_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    PROC_DEF_ID_ varchar(64),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    TENANT_ID_ varchar(64),\r
+    STATE_ integer,\r
+    REV_ integer,\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_);\r
+create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_);\r
+create index ACT_IDX_HI_PRO_INST_TENANT_ID on ACT_HI_PROCINST(TENANT_ID_);\r
+create index ACT_IDX_HI_PRO_INST_PROC_DEF_KEY on ACT_HI_PROCINST(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_PRO_INST_PROC_TIME on ACT_HI_PROCINST(START_TIME_, END_TIME_);\r
+create index ACT_IDX_HI_PI_PDEFID_END_TIME on ACT_HI_PROCINST(PROC_DEF_ID_, END_TIME_);\r
+create index ACT_IDX_HI_PRO_INST_ROOT_PI on ACT_HI_PROCINST(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_PRO_INST_RM_TIME on ACT_HI_PROCINST(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_ACTINST_ROOT_PI on ACT_HI_ACTINST(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_);\r
+create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_);\r
+create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_);\r
+create index ACT_IDX_HI_ACT_INST_COMP on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_, END_TIME_, ID_);\r
+create index ACT_IDX_HI_ACT_INST_STATS on ACT_HI_ACTINST(PROC_DEF_ID_, PROC_INST_ID_, ACT_ID_, END_TIME_, ACT_INST_STATE_);\r
+create index ACT_IDX_HI_ACT_INST_TENANT_ID on ACT_HI_ACTINST(TENANT_ID_);\r
+create index ACT_IDX_HI_ACT_INST_PROC_DEF_KEY on ACT_HI_ACTINST(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_AI_PDEFID_END_TIME on ACT_HI_ACTINST(PROC_DEF_ID_, END_TIME_);\r
+create index ACT_IDX_HI_ACT_INST_RM_TIME on ACT_HI_ACTINST(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_TASKINST_ROOT_PI on ACT_HI_TASKINST(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_TASK_INST_TENANT_ID on ACT_HI_TASKINST(TENANT_ID_);\r
+create index ACT_IDX_HI_TASK_INST_PROC_DEF_KEY on ACT_HI_TASKINST(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_TASKINST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_);\r
+create index ACT_IDX_HI_TASKINSTID_PROCINST on ACT_HI_TASKINST(ID_,PROC_INST_ID_);\r
+create index ACT_IDX_HI_TASK_INST_RM_TIME on ACT_HI_TASKINST(REMOVAL_TIME_);\r
+create index ACT_IDX_HI_TASK_INST_START on ACT_HI_TASKINST(START_TIME_);\r
+create index ACT_IDX_HI_TASK_INST_END on ACT_HI_TASKINST(END_TIME_);\r
+\r
+create index ACT_IDX_HI_DETAIL_ROOT_PI on ACT_HI_DETAIL(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_);\r
+create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_);\r
+create index ACT_IDX_HI_DETAIL_CASE_INST on ACT_HI_DETAIL(CASE_INST_ID_);\r
+create index ACT_IDX_HI_DETAIL_CASE_EXEC on ACT_HI_DETAIL(CASE_EXECUTION_ID_);\r
+create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_);\r
+create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_);\r
+create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_);\r
+create index ACT_IDX_HI_DETAIL_TENANT_ID on ACT_HI_DETAIL(TENANT_ID_);\r
+create index ACT_IDX_HI_DETAIL_PROC_DEF_KEY on ACT_HI_DETAIL(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_DETAIL_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_);\r
+create index ACT_IDX_HI_DETAIL_RM_TIME on ACT_HI_DETAIL(REMOVAL_TIME_);\r
+create index ACT_IDX_HI_DETAIL_TASK_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_, TASK_ID_);\r
+\r
+create index ACT_IDX_HI_IDENT_LNK_ROOT_PI on ACT_HI_IDENTITYLINK(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_);\r
+create index ACT_IDX_HI_IDENT_LNK_GROUP on ACT_HI_IDENTITYLINK(GROUP_ID_);\r
+create index ACT_IDX_HI_IDENT_LNK_TENANT_ID on ACT_HI_IDENTITYLINK(TENANT_ID_);\r
+create index ACT_IDX_HI_IDENT_LNK_PROC_DEF_KEY on ACT_HI_IDENTITYLINK(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_IDENT_LINK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_);\r
+create index ACT_IDX_HI_IDENT_LINK_RM_TIME on ACT_HI_IDENTITYLINK(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_VARINST_ROOT_PI on ACT_HI_VARINST(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_);\r
+create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_);\r
+create index ACT_IDX_HI_CASEVAR_CASE_INST on ACT_HI_VARINST(CASE_INST_ID_);\r
+create index ACT_IDX_HI_VAR_INST_TENANT_ID on ACT_HI_VARINST(TENANT_ID_);\r
+create index ACT_IDX_HI_VAR_INST_PROC_DEF_KEY on ACT_HI_VARINST(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_VARINST_BYTEAR on ACT_HI_VARINST(BYTEARRAY_ID_);\r
+create index ACT_IDX_HI_VARINST_RM_TIME on ACT_HI_VARINST(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_INCIDENT_TENANT_ID on ACT_HI_INCIDENT(TENANT_ID_);\r
+create index ACT_IDX_HI_INCIDENT_PROC_DEF_KEY on ACT_HI_INCIDENT(PROC_DEF_KEY_);\r
+create index ACT_IDX_HI_INCIDENT_ROOT_PI on ACT_HI_INCIDENT(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_INCIDENT_PROCINST on ACT_HI_INCIDENT(PROC_INST_ID_);\r
+create index ACT_IDX_HI_INCIDENT_RM_TIME on ACT_HI_INCIDENT(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_JOB_LOG_ROOT_PI on ACT_HI_JOB_LOG(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_PROCINST on ACT_HI_JOB_LOG(PROCESS_INSTANCE_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_PROCDEF on ACT_HI_JOB_LOG(PROCESS_DEF_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_TENANT_ID on ACT_HI_JOB_LOG(TENANT_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_JOB_DEF_ID on ACT_HI_JOB_LOG(JOB_DEF_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_PROC_DEF_KEY on ACT_HI_JOB_LOG(PROCESS_DEF_KEY_);\r
+create index ACT_IDX_HI_JOB_LOG_EX_STACK on ACT_HI_JOB_LOG(JOB_EXCEPTION_STACK_ID_);\r
+create index ACT_IDX_HI_JOB_LOG_RM_TIME on ACT_HI_JOB_LOG(REMOVAL_TIME_);\r
+\r
+create index ACT_HI_BAT_RM_TIME on ACT_HI_BATCH(REMOVAL_TIME_);\r
+\r
+create index ACT_HI_EXT_TASK_LOG_ROOT_PI on ACT_HI_EXT_TASK_LOG(ROOT_PROC_INST_ID_);\r
+create index ACT_HI_EXT_TASK_LOG_PROCINST on ACT_HI_EXT_TASK_LOG(PROC_INST_ID_);\r
+create index ACT_HI_EXT_TASK_LOG_PROCDEF on ACT_HI_EXT_TASK_LOG(PROC_DEF_ID_);\r
+create index ACT_HI_EXT_TASK_LOG_PROC_DEF_KEY on ACT_HI_EXT_TASK_LOG(PROC_DEF_KEY_);\r
+create index ACT_HI_EXT_TASK_LOG_TENANT_ID on ACT_HI_EXT_TASK_LOG(TENANT_ID_);\r
+create index ACT_IDX_HI_EXTTASKLOG_ERRORDET on ACT_HI_EXT_TASK_LOG(ERROR_DETAILS_ID_);\r
+create index ACT_HI_EXT_TASK_LOG_RM_TIME on ACT_HI_EXT_TASK_LOG(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_OP_LOG_ROOT_PI on ACT_HI_OP_LOG(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_OP_LOG_PROCINST on ACT_HI_OP_LOG(PROC_INST_ID_);\r
+create index ACT_IDX_HI_OP_LOG_PROCDEF on ACT_HI_OP_LOG(PROC_DEF_ID_);\r
+create index ACT_IDX_HI_OP_LOG_TASK on ACT_HI_OP_LOG(TASK_ID_);\r
+create index ACT_IDX_HI_OP_LOG_RM_TIME on ACT_HI_OP_LOG(REMOVAL_TIME_);\r
+create index ACT_IDX_HI_OP_LOG_TIMESTAMP on ACT_HI_OP_LOG(TIMESTAMP_);\r
+\r
+create index ACT_IDX_HI_ATTACHMENT_CONTENT on ACT_HI_ATTACHMENT(CONTENT_ID_);\r
+create index ACT_IDX_HI_ATTACHMENT_ROOT_PI on ACT_HI_ATTACHMENT(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_ATTACHMENT_PROCINST on ACT_HI_ATTACHMENT(PROC_INST_ID_);\r
+create index ACT_IDX_HI_ATTACHMENT_TASK on ACT_HI_ATTACHMENT(TASK_ID_);\r
+create index ACT_IDX_HI_ATTACHMENT_RM_TIME on ACT_HI_ATTACHMENT(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_COMMENT_TASK on ACT_HI_COMMENT(TASK_ID_);\r
+create index ACT_IDX_HI_COMMENT_ROOT_PI on ACT_HI_COMMENT(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_COMMENT_PROCINST on ACT_HI_COMMENT(PROC_INST_ID_);\r
+create index ACT_IDX_HI_COMMENT_RM_TIME on ACT_HI_COMMENT(REMOVAL_TIME_);\r
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+create table ACT_HI_CASEINST (\r
+    ID_ varchar(64) not null,\r
+    CASE_INST_ID_ varchar(64) not null,\r
+    BUSINESS_KEY_ varchar(255),\r
+    CASE_DEF_ID_ varchar(64) not null,\r
+    CREATE_TIME_ datetime not null,\r
+    CLOSE_TIME_ datetime,\r
+    DURATION_ bigint,\r
+    STATE_ integer,\r
+    CREATE_USER_ID_ varchar(255),\r
+    SUPER_CASE_INSTANCE_ID_ varchar(64),\r
+    SUPER_PROCESS_INSTANCE_ID_ varchar(64),\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_),\r
+    unique (CASE_INST_ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_HI_CASEACTINST (\r
+    ID_ varchar(64) not null,\r
+    PARENT_ACT_INST_ID_ varchar(64),\r
+    CASE_DEF_ID_ varchar(64) not null,\r
+    CASE_INST_ID_ varchar(64) not null,\r
+    CASE_ACT_ID_ varchar(255) not null,\r
+    TASK_ID_ varchar(64),\r
+    CALL_PROC_INST_ID_ varchar(64),\r
+    CALL_CASE_INST_ID_ varchar(64),\r
+    CASE_ACT_NAME_ varchar(255),\r
+    CASE_ACT_TYPE_ varchar(255),\r
+    CREATE_TIME_ datetime not null,\r
+    END_TIME_ datetime,\r
+    DURATION_ bigint,\r
+    STATE_ integer,\r
+    REQUIRED_ boolean,\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create index ACT_IDX_HI_CAS_I_CLOSE on ACT_HI_CASEINST(CLOSE_TIME_);\r
+create index ACT_IDX_HI_CAS_I_BUSKEY on ACT_HI_CASEINST(BUSINESS_KEY_);\r
+create index ACT_IDX_HI_CAS_I_TENANT_ID on ACT_HI_CASEINST(TENANT_ID_);\r
+create index ACT_IDX_HI_CAS_A_I_CREATE on ACT_HI_CASEACTINST(CREATE_TIME_);\r
+create index ACT_IDX_HI_CAS_A_I_END on ACT_HI_CASEACTINST(END_TIME_);\r
+create index ACT_IDX_HI_CAS_A_I_COMP on ACT_HI_CASEACTINST(CASE_ACT_ID_, END_TIME_, ID_);\r
+create index ACT_IDX_HI_CAS_A_I_CASEINST on ACT_HI_CASEACTINST(CASE_INST_ID_, CASE_ACT_ID_);\r
+create index ACT_IDX_HI_CAS_A_I_TENANT_ID on ACT_HI_CASEACTINST(TENANT_ID_);\r
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+-- create history decision instance table --\r
+create table ACT_HI_DECINST (\r
+    ID_ varchar(64) NOT NULL,\r
+    DEC_DEF_ID_ varchar(64) NOT NULL,\r
+    DEC_DEF_KEY_ varchar(255) NOT NULL,\r
+    DEC_DEF_NAME_ varchar(255),\r
+    PROC_DEF_KEY_ varchar(255),\r
+    PROC_DEF_ID_ varchar(64),\r
+    PROC_INST_ID_ varchar(64),\r
+    CASE_DEF_KEY_ varchar(255),\r
+    CASE_DEF_ID_ varchar(64),\r
+    CASE_INST_ID_ varchar(64),\r
+    ACT_INST_ID_ varchar(64),\r
+    ACT_ID_ varchar(255),\r
+    EVAL_TIME_ datetime not null,\r
+    REMOVAL_TIME_ datetime,\r
+    COLLECT_VALUE_ double,\r
+    USER_ID_ varchar(255),\r
+    ROOT_DEC_INST_ID_ varchar(64),\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    DEC_REQ_ID_ varchar(64),\r
+    DEC_REQ_KEY_ varchar(255),\r
+    TENANT_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create history decision input table --\r
+create table ACT_HI_DEC_IN (\r
+    ID_ varchar(64) NOT NULL,\r
+    DEC_INST_ID_ varchar(64) NOT NULL,\r
+    CLAUSE_ID_ varchar(64),\r
+    CLAUSE_NAME_ varchar(255),\r
+    VAR_TYPE_ varchar(100),\r
+    BYTEARRAY_ID_ varchar(64),\r
+    DOUBLE_ double,\r
+    LONG_ bigint,\r
+    TEXT_ varchar(4000),\r
+    TEXT2_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    CREATE_TIME_ datetime,\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+-- create history decision output table --\r
+create table ACT_HI_DEC_OUT (\r
+    ID_ varchar(64) NOT NULL,\r
+    DEC_INST_ID_ varchar(64) NOT NULL,\r
+    CLAUSE_ID_ varchar(64),\r
+    CLAUSE_NAME_ varchar(255),\r
+    RULE_ID_ varchar(64),\r
+    RULE_ORDER_ integer,\r
+    VAR_NAME_ varchar(255),\r
+    VAR_TYPE_ varchar(100),\r
+    BYTEARRAY_ID_ varchar(64),\r
+    DOUBLE_ double,\r
+    LONG_ bigint,\r
+    TEXT_ varchar(4000),\r
+    TEXT2_ varchar(4000),\r
+    TENANT_ID_ varchar(64),\r
+    CREATE_TIME_ datetime,\r
+    ROOT_PROC_INST_ID_ varchar(64),\r
+    REMOVAL_TIME_ datetime,\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+\r
+create index ACT_IDX_HI_DEC_INST_ID on ACT_HI_DECINST(DEC_DEF_ID_);\r
+create index ACT_IDX_HI_DEC_INST_KEY on ACT_HI_DECINST(DEC_DEF_KEY_);\r
+create index ACT_IDX_HI_DEC_INST_PI on ACT_HI_DECINST(PROC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_INST_CI on ACT_HI_DECINST(CASE_INST_ID_);\r
+create index ACT_IDX_HI_DEC_INST_ACT on ACT_HI_DECINST(ACT_ID_);\r
+create index ACT_IDX_HI_DEC_INST_ACT_INST on ACT_HI_DECINST(ACT_INST_ID_);\r
+create index ACT_IDX_HI_DEC_INST_TIME on ACT_HI_DECINST(EVAL_TIME_);\r
+create index ACT_IDX_HI_DEC_INST_TENANT_ID on ACT_HI_DECINST(TENANT_ID_);\r
+create index ACT_IDX_HI_DEC_INST_ROOT_ID on ACT_HI_DECINST(ROOT_DEC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_INST_REQ_ID on ACT_HI_DECINST(DEC_REQ_ID_);\r
+create index ACT_IDX_HI_DEC_INST_REQ_KEY on ACT_HI_DECINST(DEC_REQ_KEY_);\r
+create index ACT_IDX_HI_DEC_INST_ROOT_PI on ACT_HI_DECINST(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_INST_RM_TIME on ACT_HI_DECINST(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_DEC_IN_INST on ACT_HI_DEC_IN(DEC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_IN_CLAUSE on ACT_HI_DEC_IN(DEC_INST_ID_, CLAUSE_ID_);\r
+create index ACT_IDX_HI_DEC_IN_ROOT_PI on ACT_HI_DEC_IN(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_IN_RM_TIME on ACT_HI_DEC_IN(REMOVAL_TIME_);\r
+\r
+create index ACT_IDX_HI_DEC_OUT_INST on ACT_HI_DEC_OUT(DEC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_OUT_RULE on ACT_HI_DEC_OUT(RULE_ORDER_, CLAUSE_ID_);\r
+create index ACT_IDX_HI_DEC_OUT_ROOT_PI on ACT_HI_DEC_OUT(ROOT_PROC_INST_ID_);\r
+create index ACT_IDX_HI_DEC_OUT_RM_TIME on ACT_HI_DEC_OUT(REMOVAL_TIME_);\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql
new file mode 100644 (file)
index 0000000..113240d
--- /dev/null
@@ -0,0 +1,103 @@
+--\r
+-- Copyright Â© 2012 - 2018 camunda services GmbH and various authors (info@camunda.com)\r
+--\r
+-- Licensed under the Apache License, Version 2.0 (the "License");\r
+-- you may not use this file except in compliance with the License.\r
+-- You may obtain a copy of the License at\r
+--\r
+--     http://www.apache.org/licenses/LICENSE-2.0\r
+--\r
+-- Unless required by applicable law or agreed to in writing, software\r
+-- distributed under the License is distributed on an "AS IS" BASIS,\r
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+-- See the License for the specific language governing permissions and\r
+-- limitations under the License.\r
+--\r
+\r
+create table ACT_ID_GROUP (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    NAME_ varchar(255),\r
+    TYPE_ varchar(255),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_ID_MEMBERSHIP (\r
+    USER_ID_ varchar(64),\r
+    GROUP_ID_ varchar(64),\r
+    primary key (USER_ID_, GROUP_ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_ID_USER (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    FIRST_ varchar(255),\r
+    LAST_ varchar(255),\r
+    EMAIL_ varchar(255),\r
+    PWD_ varchar(255),\r
+    SALT_ varchar(255),\r
+    LOCK_EXP_TIME_ timestamp NULL,\r
+    ATTEMPTS_ integer,\r
+    PICTURE_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_ID_INFO (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    USER_ID_ varchar(64),\r
+    TYPE_ varchar(64),\r
+    KEY_ varchar(255),\r
+    VALUE_ varchar(255),\r
+    PASSWORD_ LONGBLOB,\r
+    PARENT_ID_ varchar(255),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_ID_TENANT (\r
+    ID_ varchar(64),\r
+    REV_ integer,\r
+    NAME_ varchar(255),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+create table ACT_ID_TENANT_MEMBER (\r
+    ID_ varchar(64) not null,\r
+    TENANT_ID_ varchar(64) not null,\r
+    USER_ID_ varchar(64),\r
+    GROUP_ID_ varchar(64),\r
+    primary key (ID_)\r
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;\r
+\r
+alter table ACT_ID_MEMBERSHIP\r
+    add constraint ACT_FK_MEMB_GROUP\r
+    foreign key (GROUP_ID_)\r
+    references ACT_ID_GROUP (ID_);\r
+\r
+alter table ACT_ID_MEMBERSHIP\r
+    add constraint ACT_FK_MEMB_USER\r
+    foreign key (USER_ID_)\r
+    references ACT_ID_USER (ID_);\r
+\r
+alter table ACT_ID_TENANT_MEMBER\r
+    add constraint ACT_UNIQ_TENANT_MEMB_USER\r
+    unique (TENANT_ID_, USER_ID_);\r
+\r
+alter table ACT_ID_TENANT_MEMBER\r
+    add constraint ACT_UNIQ_TENANT_MEMB_GROUP\r
+    unique (TENANT_ID_, GROUP_ID_);\r
+\r
+alter table ACT_ID_TENANT_MEMBER\r
+    add constraint ACT_FK_TENANT_MEMB\r
+    foreign key (TENANT_ID_)\r
+    references ACT_ID_TENANT (ID_);\r
+\r
+alter table ACT_ID_TENANT_MEMBER\r
+    add constraint ACT_FK_TENANT_MEMB_USER\r
+    foreign key (USER_ID_)\r
+    references ACT_ID_USER (ID_);\r
+\r
+alter table ACT_ID_TENANT_MEMBER\r
+    add constraint ACT_FK_TENANT_MEMB_GROUP\r
+    foreign key (GROUP_ID_)\r
+    references ACT_ID_GROUP (ID_);\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml
new file mode 100644 (file)
index 0000000..858345f
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: v1\r
+kind: ConfigMap\r
+metadata:\r
+  name : {{ .Values.global.mysqldb.appName }}-init-scripts\r
+\r
+data:\r
+  {{- $files := .Files }}\r
+  {{- range $key, $value := .Files }}\r
+  {{- if hasPrefix "scripts/" $key }} {{/* only when in scripts/ */}}\r
+  {{ $key | trimPrefix "scripts/" }}: {{ $files.Get $key | quote }} {{/* adapt $key as desired */}}\r
+  {{- end }}\r
+  {{- end }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml
new file mode 100644 (file)
index 0000000..41f8d33
--- /dev/null
@@ -0,0 +1,97 @@
+apiVersion: apps/v1\r
+kind: Deployment\r
+\r
+metadata:\r
+  name: {{ .Values.global.mysqldb.appName }}\r
+  namespace: {{.Values.global.mysqldb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mysqldb.appName }}\r
+    version: {{.Values.global.mysqldb.version }}\r
+    \r
+spec:\r
+  strategy:\r
+   # indicate which strategy we want for rolling update\r
+    type: RollingUpdate\r
+    rollingUpdate:\r
+      maxSurge: 1\r
+      maxUnavailable: 1\r
+  replicas: {{ .Values.global.mysqldb.replicas }}\r
+  selector:\r
+    matchLabels:\r
+      app: {{ .Values.global.mysqldb.appName }}\r
+      version: {{.Values.global.mysqldb.version }}\r
+  template:\r
+    metadata:\r
+      labels:\r
+        app: {{ .Values.global.mysqldb.appName }}\r
+        version: {{.Values.global.mysqldb.version }}\r
+    spec:\r
+      serviceAccountName: default\r
+      containers:\r
+      - name: {{ .Values.global.mysqldb.appName }}\r
+        image: "{{ .Values.image }}:{{ .Values.imageTag }}"\r
+        image: {{ .Values.global.mysqldb.image.image }}:{{ .Values.global.mysqldb.image.tag }}\r
+        imagePullPolicy: Always\r
+        resources:\r
+          limits: \r
+            memory: {{ .Values.global.mysqldb.resources.limits.memory }}\r
+            cpu: {{ .Values.global.mysqldb.resources.limits.cpu }}\r
+          requests:\r
+            memory: {{ .Values.global.mysqldb.resources.requests.memory }}\r
+            cpu: {{ .Values.global.mysqldb.resources.requests.cpu }}\r
+        env:\r
+        - name: MYSQL_ROOT_PASSWORD\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.global.mysqldb.appName }}\r
+              key: mysql_root_password\r
+        - name: MYSQL_PASSWORD\r
+          valueFrom:\r
+            secretKeyRef:\r
+              name: {{ .Values.global.mysqldb.appName }}\r
+              key: mysql_password\r
+        - name: MYSQL_USER\r
+          value: {{ .Values.global.mysqldb.mysqlUser | quote }}\r
+        - name: MYSQL_DATABASE\r
+          value: {{ .Values.global.mysqldb.mysqlDatabase | quote }}\r
+        - name: TZ\r
+          value: {{ .Values.timezone }}\r
+        ports:\r
+        - name: {{ .Values.global.mysqldb.appName }}\r
+          containerPort: 3306\r
+        livenessProbe:\r
+          exec:\r
+            command:\r
+            - sh\r
+            - -c\r
+            - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"\r
+          initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}\r
+          periodSeconds: {{ .Values.livenessProbe.periodSeconds }}\r
+          timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}\r
+          successThreshold: {{ .Values.livenessProbe.successThreshold }}\r
+          failureThreshold: {{ .Values.livenessProbe.failureThreshold }}\r
+        readinessProbe:\r
+          exec:\r
+            command:\r
+            - sh\r
+            - -c\r
+            - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"\r
+          initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}\r
+          periodSeconds: {{ .Values.readinessProbe.periodSeconds }}\r
+          timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}\r
+          successThreshold: {{ .Values.readinessProbe.successThreshold }}\r
+          failureThreshold: {{ .Values.readinessProbe.failureThreshold }}\r
+        lifecycle:\r
+          postStart:\r
+            exec:\r
+              command: ["/bin/bash", "-c", "cd data/scripts;./init_db.sh"]\r
+        volumeMounts:\r
+           - name: custom-init-scripts\r
+             mountPath: /data/scripts\r
+      volumes:\r
+        - name: custom-init-scripts\r
+          configMap:\r
+            name: {{  .Values.global.mysqldb.appName }}-init-scripts\r
+            defaultMode: 0755\r
+      restartPolicy: Always\r
\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml
new file mode 100644 (file)
index 0000000..4abb02c
--- /dev/null
@@ -0,0 +1,30 @@
+# Need Updates to configure the connectivity when we deploy other microservices to connect to mysqldb\r
+\r
+apiVersion: extensions/v1beta1\r
+kind: Ingress\r
+metadata:\r
+  name: {{ .Values.global.mysqldb.appName }}\r
+  namespace: {{.Values.global.mysqldb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mysqldb.appName }}\r
+    version: {{.Values.global.mysqldb.version }}\r
+  annotations:\r
+    kubernetes.io/ingress.class: nginx\r
+    nginx.ingress.kubernetes.io/ssl-redirect: "true"\r
+    nginx.ingress.kubernetes.io/rewrite-target: /$1\r
+    nginx.ingress.kubernetes.io/configuration-snippet: |\r
+      proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port;\r
+      grpc_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port;\r
+spec:\r
+  tls:\r
+  - hosts:\r
+    - {{ .Values.global.mysqldb.nodeApi.host }}\r
+    secretName: {{.Values.global.mysqldb.certName }}\r
+  rules:\r
+  - host: {{ .Values.global.mysqldb.nodeApi.host }}\r
+    http:\r
+      paths:\r
+      - path: /mysqldb/(.*)\r
+        backend:\r
+          serviceName: {{ .Values.global.mysqldb.appName }}\r
+          servicePort: {{ .Values.global.mysqldb.port }}
\ No newline at end of file
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml
new file mode 100644 (file)
index 0000000..80834a7
--- /dev/null
@@ -0,0 +1,8 @@
+apiVersion: v1\r
+kind: Secret\r
+metadata:\r
+  name: {{ .Values.global.mysqldb.appName }}\r
+type: opaque\r
+data:\r
+  mysql_root_password: {{ .Values.global.mysqldb.mysqlRootPassword  | b64enc }}\r
+  mysql_password: {{ .Values.global.mysqldb.mysqlPassword  | b64enc }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml
new file mode 100644 (file)
index 0000000..7a9c79a
--- /dev/null
@@ -0,0 +1,19 @@
+apiVersion: v1\r
+kind: Service\r
+metadata:\r
+  name: {{ .Values.global.mysqldb.appName }}\r
+  namespace: {{ .Values.global.mysqldb.namespace }}\r
+  labels:\r
+    app: {{ .Values.global.mysqldb.appName }}\r
+    version: {{ .Values.global.mysqldb.version }}\r
+  annotations:\r
+    service.beta.kubernetes.io/azure-load-balancer-internal: "true"\r
+spec:\r
+  type: LoadBalancer\r
+  ports:\r
+  - port: {{ .Values.global.mysqldb.port }}\r
+    protocol: TCP\r
+    targetPort: {{ .Values.global.mysqldb.targetPort }}\r
+  selector:\r
+    app: {{ .Values.global.mysqldb.appName }}\r
+    version: {{ .Values.global.mysqldb.version }}\r
diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/values.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/values.yaml
new file mode 100644 (file)
index 0000000..1c0457b
--- /dev/null
@@ -0,0 +1,233 @@
+# Values yaml file for reference from the github. - currently not used.\r
+\r
+## mysql image version\r
+## ref: https://hub.docker.com/r/library/mysql/tags/\r
+##\r
+image: "mysql"\r
+imageTag: "5.7.26"\r
+\r
+strategy:\r
+  type: Recreate\r
+\r
+busybox:\r
+  image: "busybox"\r
+  tag: "1.29.3"\r
+\r
+testFramework:\r
+  enabled: true\r
+  image: "dduportal/bats"\r
+  tag: "0.4.0"\r
+\r
+## Specify password for root user\r
+##\r
+## Default: random 10 character string\r
+# mysqlRootPassword: testing\r
+\r
+## Create a database user\r
+##\r
+# mysqlUser:\r
+## Default: random 10 character string\r
+# mysqlPassword:\r
+\r
+## Allow unauthenticated access, uncomment to enable\r
+##\r
+# mysqlAllowEmptyPassword: true\r
+\r
+## Create a database\r
+##\r
+# mysqlDatabase:\r
+\r
+## Specify an imagePullPolicy (Required)\r
+## It's recommended to change this to 'Always' if the image tag is 'latest'\r
+## ref: http://kubernetes.io/docs/user-guide/images/#updating-images\r
+##\r
+imagePullPolicy: IfNotPresent\r
+\r
+## Additionnal arguments that are passed to the MySQL container.\r
+## For example use --default-authentication-plugin=mysql_native_password if older clients need to\r
+## connect to a MySQL 8 instance.\r
+args: []\r
+\r
+extraVolumes: |\r
+  # - name: extras\r
+  #   emptyDir: {}\r
+\r
+extraVolumeMounts: |\r
+  # - name: extras\r
+  #   mountPath: /usr/share/extras\r
+  #   readOnly: true\r
+\r
+extraInitContainers: |\r
+  # - name: do-something\r
+  #   image: busybox\r
+  #   command: ['do', 'something']\r
+\r
+# Optionally specify an array of imagePullSecrets.\r
+# Secrets must be manually created in the namespace.\r
+# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\r
+# imagePullSecrets:\r
+  # - name: myRegistryKeySecretName\r
+\r
+## Node selector\r
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector\r
+nodeSelector: {}\r
+\r
+## Affinity\r
+## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\r
+affinity: {}\r
+\r
+## Tolerations for pod assignment\r
+## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/\r
+##\r
+tolerations: []\r
+\r
+livenessProbe:\r
+  initialDelaySeconds: 30\r
+  periodSeconds: 10\r
+  timeoutSeconds: 5\r
+  successThreshold: 1\r
+  failureThreshold: 3\r
+\r
+readinessProbe:\r
+  initialDelaySeconds: 5\r
+  periodSeconds: 10\r
+  timeoutSeconds: 1\r
+  successThreshold: 1\r
+  failureThreshold: 3\r
+\r
+## Persist data to a persistent volume\r
+persistence:\r
+  enabled: true\r
+  ## database data Persistent Volume Storage Class\r
+  ## If defined, storageClassName: <storageClass>\r
+  ## If set to "-", storageClassName: "", which disables dynamic provisioning\r
+  ## If undefined (the default) or set to null, no storageClassName spec is\r
+  ##   set, choosing the default provisioner.  (gp2 on AWS, standard on\r
+  ##   GKE, AWS & OpenStack)\r
+  ##\r
+  # storageClass: "-"\r
+  accessMode: ReadWriteOnce\r
+  size: 8Gi\r
+  annotations: {}\r
+\r
+## Use an alternate scheduler, e.g. "stork".\r
+## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/\r
+##\r
+# schedulerName:\r
+\r
+## Security context\r
+securityContext:\r
+  enabled: false\r
+  runAsUser: 999\r
+  fsGroup: 999\r
+\r
+## Configure resource requests and limits\r
+## ref: http://kubernetes.io/docs/user-guide/compute-resources/\r
+##\r
+resources:\r
+  requests:\r
+    memory: 256Mi\r
+    cpu: 100m\r
+\r
+# Custom mysql configuration files path\r
+configurationFilesPath: /etc/mysql/conf.d/\r
+\r
+# Custom mysql configuration files used to override default mysql settings\r
+configurationFiles: {}\r
+#  mysql.cnf: |-\r
+#    [mysqld]\r
+#    skip-name-resolve\r
+#    ssl-ca=/ssl/ca.pem\r
+#    ssl-cert=/ssl/server-cert.pem\r
+#    ssl-key=/ssl/server-key.pem\r
+\r
+# Custom mysql init SQL files used to initialize the database\r
+initializationFiles: {}\r
+#  first-db.sql: |-\r
+#    CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;\r
+#  second-db.sql: |-\r
+#    CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;\r
+\r
+metrics:\r
+  enabled: false\r
+  image: prom/mysqld-exporter\r
+  imageTag: v0.10.0\r
+  imagePullPolicy: IfNotPresent\r
+  resources: {}\r
+  annotations: {}\r
+    # prometheus.io/scrape: "true"\r
+    # prometheus.io/port: "9104"\r
+  livenessProbe:\r
+    initialDelaySeconds: 15\r
+    timeoutSeconds: 5\r
+  readinessProbe:\r
+    initialDelaySeconds: 5\r
+    timeoutSeconds: 1\r
+  flags: []\r
+  serviceMonitor:\r
+    enabled: false\r
+    additionalLabels: {}\r
+\r
+## Configure the service\r
+## ref: http://kubernetes.io/docs/user-guide/services/\r
+service:\r
+  annotations: {}\r
+  ## Specify a service type\r
+  ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types\r
+  type: ClusterIP\r
+  port: 3306\r
+  # nodePort: 32000\r
+  # loadBalancerIP:\r
+\r
+## Pods Service Account\r
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/\r
+serviceAccount:\r
+  ## Specifies whether a ServiceAccount should be created\r
+  ##\r
+  create: false\r
+  ## The name of the ServiceAccount to use.\r
+  ## If not set and create is true, a name is generated using the mariadb.fullname template\r
+  # name:\r
+\r
+ssl:\r
+  enabled: false\r
+  secret: mysql-ssl-certs\r
+  certificates:\r
+#  - name: mysql-ssl-certs\r
+#    ca: |-\r
+#      -----BEGIN CERTIFICATE-----\r
+#      ...\r
+#      -----END CERTIFICATE-----\r
+#    cert: |-\r
+#      -----BEGIN CERTIFICATE-----\r
+#      ...\r
+#      -----END CERTIFICATE-----\r
+#    key: |-\r
+#      -----BEGIN RSA PRIVATE KEY-----\r
+#      ...\r
+#      -----END RSA PRIVATE KEY-----\r
+\r
+## Populates the 'TZ' system timezone environment variable\r
+## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html\r
+##\r
+## Default: nil (mysql will use image's default timezone, normally UTC)\r
+## Example: 'Australia/Sydney'\r
+# timezone:\r
+\r
+# Deployment Annotations\r
+deploymentAnnotations: {}\r
+\r
+# To be added to the database server pod(s)\r
+podAnnotations: {}\r
+podLabels: {}\r
+\r
+## Set pod priorityClassName\r
+# priorityClassName: {}\r
+\r
+\r
+## Init container resources defaults\r
+initContainer:\r
+  resources:\r
+    requests:\r
+      memory: 10Mi\r
+      cpu: 10m\r
diff --git a/otf-helm/otf/values/development.yaml b/otf-helm/otf/values/development.yaml
new file mode 100644 (file)
index 0000000..894d035
--- /dev/null
@@ -0,0 +1 @@
+#These are the development environment specific values\r
diff --git a/otf-helm/readme.md b/otf-helm/readme.md
new file mode 100644 (file)
index 0000000..c9db864
--- /dev/null
@@ -0,0 +1,5 @@
+Open Test Framework \r
+\r
+Use these heml charts to deploy otf\r
+\r
+helm install otf
\ No newline at end of file