From: Chen, Jackie Date: Mon, 15 Jun 2020 18:28:29 +0000 (-0400) Subject: Adding policy manager and a1 sdnc vth X-Git-Url: https://gerrit.o-ran-sc.org/r/gitweb?p=it%2Fotf.git;a=commitdiff_plain;h=cbbc9d7f12f8d8168e285a32f9002aca339a0e39 Adding policy manager and a1 sdnc vth included otf-helm with this commit. used for otf database deployments removed mock serever from both vths Change-Id: Ic1ca76eb89da3d79b100331ae1e11fb98c91113a Signed-off-by: Chen, Jackie --- diff --git a/a1-policy-manager-vth/.environ b/a1-policy-manager-vth/.environ new file mode 100644 index 0000000..0b3fa87 --- /dev/null +++ b/a1-policy-manager-vth/.environ @@ -0,0 +1,9 @@ +#if using dotenv change file name to .env and set env variables below +USER=String +PW=String +AUTH=Boolean +PROXY=Boolean +HTTP=String +HTTPS=String +API_URL=String +API_PORT=Int diff --git a/a1-policy-manager-vth/Jenkinsfile b/a1-policy-manager-vth/Jenkinsfile new file mode 100644 index 0000000..3244ca2 --- /dev/null +++ b/a1-policy-manager-vth/Jenkinsfile @@ -0,0 +1,158 @@ +#!/usr/bin/env groovy + +/* Copyright (c) 2019 AT&T Intellectual Property. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +##############################################################################*/ + + +properties([[$class: 'ParametersDefinitionProperty', parameterDefinitions: [ + [$class: 'hudson.model.StringParameterDefinition', name: 'PHASE', defaultValue: "BUILD"], + [$class: 'hudson.model.StringParameterDefinition', name: 'ENV', defaultValue: "dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'MECHID', defaultValue: "m13591_otf_dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'KUBE_CONFIG', defaultValue: "kubeConfig-dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'TILLER_NAMESPACE', defaultValue: "com-att-ecomp-otf-dev"] +]]]) + + + echo "Build branch: ${env.BRANCH_NAME}" + + node("docker"){ + stage 'Checkout' + checkout scm + PHASES=PHASE.tokenize( '_' ); + echo "PHASES : " + PHASES + + + ARTIFACT_ID="a1-policy-manager-vth"; + VERSION="0.0.1-SNAPSHOT"; + NAMESPACE="com.att.ecomp.otf" //TODO change back to org-otf-oran when done testing + DOCKER_REGISTRY="dockercentral.it.att.com:5100" + + if( ENV.equalsIgnoreCase("dev") ){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".dev" + "/" + ARTIFACT_ID + ":" + VERSION + + } + if( ENV.equalsIgnoreCase("prod") || ENV.equalsIgnoreCase("prod-dr")){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".prod" + "/" + ARTIFACT_ID + ":" + VERSION + + } + + if( ENV.equalsIgnoreCase("st") ){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".st" + "/" + ARTIFACT_ID + ":" + VERSION + + } + + echo "Artifact: " + IMAGE_NAME + + withEnv(["PATH=${env.PATH}:${env.WORKSPACE}/linux-amd64", "HELM_HOME=${env.WORKSPACE}"]) { + + echo "PATH=${env.PATH}" + echo "HELM_HOME=${env.HELM_HOME}" + + if (PHASES.contains("BUILD")){ + dir("./a1-policy-manager-vth"){ + + stage 'Publish Artifact' + + withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { + + echo "Artifact: " + IMAGE_NAME + + sh """ + docker login $DOCKER_REGISTRY --username $USERNAME --password $PASSWORD + docker build -t $IMAGE_NAME . + docker push $IMAGE_NAME + """ + } + } + + } + + if (PHASES.contains("DEPLOY") || PHASES.contains("UNDEPLOY")) { + + stage 'Init Helm' + + //check if helm exists if not install + if(fileExists('linux-amd64/helm')){ + sh """ + echo "helm is already installed" + """ + } + else{ + //download helm + sh """ + echo "installing helm" + wget https://storage.googleapis.com/kubernetes-helm/helm-v2.14.3-linux-amd64.tar.gz + tar -xf helm-v2.14.3-linux-amd64.tar.gz + rm helm-v2.14.3-linux-amd64.tar.gz + """ + } + + withCredentials([file(credentialsId: KUBE_CONFIG, variable: 'KUBECONFIG')]) { + + dir('a1-policy-manager-vth/helm'){ + //check if charts are valid, and then perform dry run, if successful then upgrade/install charts + + if (PHASES.contains("UNDEPLOY") ) { + stage 'Undeploy' + + sh """ + helm delete --tiller-namespace=$TILLER_NAMESPACE --purge $ARTIFACT_ID + """ + } + + //NOTE Double quotes are used below to access groovy variables like artifact_id and tiller_namespace + if (PHASES.contains("DEPLOY") ){ + stage 'Deploy' + withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { + + sh """ + echo "Validate Yaml" + helm lint $ARTIFACT_ID + + echo "View Helm Templates" + helm template $ARTIFACT_ID --set appName=$ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + echo "Perform Dry Run Of Install" + helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install --dry-run $ARTIFACT_ID $ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + + echo "Helm Install/Upgrade" + helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install $ARTIFACT_ID $ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + """ + } + } + + } + } + } + + } + } diff --git a/a1-policy-manager-vth/app/__init__.py b/a1-policy-manager-vth/app/__init__.py new file mode 100644 index 0000000..14b5496 --- /dev/null +++ b/a1-policy-manager-vth/app/__init__.py @@ -0,0 +1,11 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Import everything using star, methods or anything that should not be + used by the outside modules should not be imported on the nested + __init__ files. +""" +from .routes import * +from .errors import * +from .models import * +from .helpers import * diff --git a/a1-policy-manager-vth/app/errors/__init__.py b/a1-policy-manager-vth/app/errors/__init__.py new file mode 100644 index 0000000..b491f42 --- /dev/null +++ b/a1-policy-manager-vth/app/errors/__init__.py @@ -0,0 +1,6 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" +from .bad_request_exception import BadRequestException diff --git a/a1-policy-manager-vth/app/errors/bad_request_exception.py b/a1-policy-manager-vth/app/errors/bad_request_exception.py new file mode 100644 index 0000000..a3e3d22 --- /dev/null +++ b/a1-policy-manager-vth/app/errors/bad_request_exception.py @@ -0,0 +1,21 @@ +""" +Args: +Returns: +Examples: +""" +class BadRequestException(Exception): + """ + Args: + Returns: + Examples: + """ + def __init__(self, status_code=406, message="Not Acceptable Response"): + cases = { + 401:"Unauthorized", + 403:"Forbidden", + 404:"Not Found", + 423:"Not Operational" + } + super().__init__(cases.get(status_code, message)) + self.status_code = status_code + self.message = message diff --git a/a1-policy-manager-vth/app/helpers/__init__.py b/a1-policy-manager-vth/app/helpers/__init__.py new file mode 100644 index 0000000..3313af8 --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/__init__.py @@ -0,0 +1,12 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" +from .error_helper import * +from .response_helper import * +from .time_helper import * +from .policy_helper import * +from .service_helper import * +from .ric_helper import * +from .action_helper import * diff --git a/a1-policy-manager-vth/app/helpers/action_helper.py b/a1-policy-manager-vth/app/helpers/action_helper.py new file mode 100644 index 0000000..f952e76 --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/action_helper.py @@ -0,0 +1,54 @@ +import json +import ast +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException +import requests + +def execute_action(request, response_dict, config): + headers = ResponseHelper.create_headers(); + request_data = request.json + action_request = request_data.get("action").lower() + method = request_data.get("method").upper() + creds = ResponseHelper.get_credentials(request_data, config) + + proxies = ResponseHelper.get_proxies(config) + action = "services/keepalive" if action_request == "keepalive" else action_request + url = ResponseHelper.create_url(config=config, uri_path="/"+action) +# ret_url = request.args.get('retURL') + + + json_req = ast.literal_eval(request_data["action_data"]["jsonBody"]) + query_params = ast.literal_eval(request_data["action_data"]["query"]) + current_app.logger.info("Requesting Url: {}, params: {}, body: {}, auth: {}, proxies: {}".format(url, query_params, json_req, creds, proxies)) + try: + if(method == "GET"): + res = requests.get(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req) + elif(method == "POST"): + res = requests.post(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req) + elif(method == "PUT"): + res = requests.put(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req) + elif(method == "DELETE"): + res = requests.delete(url, proxies=proxies, auth=creds, headers=headers, params=query_params, json=json_req) + else: + raise BadRequestException(406, "Method Not Supported") + response = { + "status_code":res.status_code, + "result": res.json() + } + except(json.decoder.JSONDecodeError): + response = { + "status_code":res.status_code, + "result": res.reason + } + except requests.exceptions.RequestException: + response = { + "status_code":504, + "result": "Something Happned" + } + finally: + response_dict['vthResponse']['resultData'] = response + # if ret_url is not None: + # ResponseHelper.sendCallback(ret_url,response_dict) + # return '',200 + return response_dict diff --git a/a1-policy-manager-vth/app/helpers/error_helper.py b/a1-policy-manager-vth/app/helpers/error_helper.py new file mode 100644 index 0000000..b34cedf --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/error_helper.py @@ -0,0 +1,51 @@ +from flask import current_app +import datetime +""" +Args: +Returns: +Examples: +""" + +def error_dic(error, status_code, response_message="Something went wrong, vth encountered an error"): + """ + Args: + Returns: + Examples: + """ + message = [str(x) for x in error.args] + error_log={ + "error":{ + "type": error.__class__.__name__, + "message": message + } + } + response_data = { + "vthResponse": { + "testDurationMS": 0, + 'dateTimeUTC': str(datetime.datetime.now()), + "abstractMessage": "Failed", + "error":response_message, + "status_code": status_code, + "resultData": {} + } + } + current_app.logger.error(error_log) + return response_data + +def error_dic2(error, status_code=500): + """ + Args: + Returns: + Examples: + """ + message = [str(x) for x in error.args] + response = { + "status_code" : status_code, + "success": False, + "error":{ + "type": error.__class__.__name__, + "message": message + } + } + return response + diff --git a/a1-policy-manager-vth/app/helpers/policy_helper.py b/a1-policy-manager-vth/app/helpers/policy_helper.py new file mode 100644 index 0000000..ea4fedc --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/policy_helper.py @@ -0,0 +1,163 @@ +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException +import requests + +def get_policy_using_get(request, response_dict, config): + json_data = request.get_json() + if 'id' not in json_data: raise BadRequestException(406, "Request is missing id") + param = {'id': json_data['id']} + creds = ResponseHelper.get_credentials(json_data, config) + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + + return response_dict +def put_policy_using_put(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + + current_app.logger.info("creds: {}".format(creds)) + + required = {'id', 'jsonBody', 'ric', 'service'} + param_keys = {'id', 'ric', 'service'} + optional = {"type"} + data_keys = param_keys.copy() + keys = set(json_data.keys()) + if not required <= keys: + raise BadRequestException(406, "Request is missing required values {}".format(required)) + if optional <= keys: data_keys.update(optional) + param = {} + body = {} + for key in data_keys: + param[key] = json_data[key] + body['jsonBody'] = json_data['jsonBody'] + + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.put(url, auth=creds, params=param, json=body) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def delete_policy_using_delete(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = {'id': json_data['id']} + + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.delete(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + +def get_policy_ids_using_get(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + "ric":json_data["ric"] if "ric" in json_data else "", + "service":json_data["service"] if "service" in json_data else "", + "type":json_data["type"] if "type" in json_data else "" + } + + url = ResponseHelper.create_url(config=config, uri_path="/policy_ids") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + +def get_policy_schema_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = {'id': json_data['id']} + + url = ResponseHelper.create_url(config=config, uri_path="/policy_schema") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def get_policy_schemas_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + "ric":json_data['ric'] if 'ric' in json_data else "" + } + #api_response = requests.put(url, credentials=creds, params=param) + + url = ResponseHelper.create_url(config=config, uri_path="/policy_schemas") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def get_policy_status_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = { + "id":json_data["id"] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def get_policy_types_using_get(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + param = { + 'ric': json_data['ric'] if 'ric' in json_data else "" + } + + url = ResponseHelper.create_url(config=config, uri_path="/a1-p/policytypes") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + diff --git a/a1-policy-manager-vth/app/helpers/response_helper.py b/a1-policy-manager-vth/app/helpers/response_helper.py new file mode 100644 index 0000000..2058347 --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/response_helper.py @@ -0,0 +1,221 @@ +import ast +import requests +from configparser import ConfigParser +import os +import datetime +import json +from flask import request, jsonify, current_app +from app.helpers.time_helper import unix_time_millis +from app.errors.bad_request_exception import BadRequestException +import requests + +""" + Module Info: +""" +def create_headers(enable_cache=True, content_type="application/json", connection="Keep-Alive"): + headers = {'Cache-Control':'no-cache, no-store, must-revalidate', "Pragma":"no-cache", "Expires":"0"} if not enable_cache else {} + headers['content-type'] = content_type + headers['connection'] = connection + return headers +def create_url(config=None, uri_path = "/", url_string=None): + return config['api']['base_url'] +":"+ config['api']['port']+uri_path if url_string is None else url_string + +def valid_string_json(string, response_message="Invalid json string in query or jsonBody, format requires quoted json object e.g. \"{'key':'value, key2:{'innerKey':'innerValue'}}\""): + try: + string_to_dict = ast.literal_eval(string) + except(Exception): + raise BadRequestException(406, response_message) + return True +def route_check(config=None, get_function=None, post_function=None, put_function=None, delete_function=None): + """ + Info: + Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse + Arguments (**kwargs): pass in the specified key(s) and method(s) that handle the type of method, method must be allowed by route decorator + get_function => type: function + put_function => type: function + delete_function => type: function + Returns: + returns the return of the function call, typically a jsonified response. + you can capture response in a var and execute logic or you can just return the function call/response + E.G.: + response = route_check(post_function = handle_post) + return route_check(get_function = handle_get, post_function = handle_post) + """ + if not request.is_json: raise BadRequestException(406, "Invalid Json Request") + + + response_dict = vth_response_dic() + start_time = unix_time_millis(datetime.datetime.now()) + status_code = 200 + ret_url = request.args.get('retURL') + + query = "" + json_body = "" + request_data = request.json + json_keys = set(request_data) + action_request = request_data.get("action").lower() + valid_actions = {"policies", "policy", "policy_ids", "policy_schema", "policy_schemas", "policy_status", "policy_types", "ric", "rics", "service", "services", "keepalive", "status" } + required_keys = {"action", "method", "auth", "action_data"} + + #check for valid action and json request contains required keys + if not required_keys <= json_keys: raise BadRequestException(406, "Json request is missing required keys {}".format(required_keys)) + if not action_request in valid_actions: raise BadRequestException(406, "Action is not supported {}".format(action_request)) + #check request's action_data key contains required keys + if 'query' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody ") + if 'jsonBody' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody") + + query = request.json['action_data']['query'] if 'query' in request.json['action_data'] else "" + json_body = request.json['action_data']['jsonBody'] if 'jsonBody' in request.json['action_data'] else "" + + + if valid_string_json(query) and valid_string_json(json_body): + if(request.method == 'GET'): + response_dict = get_function(request, response_dict, config) + elif(request.method == 'POST'): + response_dict = post_function(request, response_dict, config) + elif(request.method == 'PUT'): + response_dict = put_function(request, response_dict, config) + elif(request.method == 'DELETE'): + response_dict = delete_function(request, response_dict, config) + else: + raise BadRequestException(406, "Invalid JSON Strings") + end_time = unix_time_millis(datetime.datetime.now()) + response_dict['vthResponse']['testDurationMS'] = end_time-start_time + if ret_url is not None: + sendCallback(ret_url,response_dict) + return '',200 + return jsonify(response_dict), status_code + +def get_proxies(config): + proxy_enabled = config.getboolean('resource', 'proxy_enabled') + req_proxies = { + 'http': None, + 'https': None + } + if not proxy_enabled: + return None + else: + req_proxies['http'] = config['resource']['http_proxy'] + req_proxies['https'] = config['resource']['https_proxy'] + return req_proxies +def get_credentials(json_data, config): + auth_enabled = config.getboolean('auth', 'creds_enabled') + if not auth_enabled: + return None + else: + username = config['auth']['username'] if 'username' not in json_data['auth'] else json_data['auth']['username'] + password = config['auth']['password'] if 'password' not in json_data['auth'] else json_data['auth']['password'] + return (username, password) +def vth_response_dic(): + """ + Args: + Returns: + Examples: + """ + response_data = { + "vthResponse": { + "testDurationMS": "", + 'dateTimeUTC': str(datetime.datetime.now()), + "abstractMessage": "Success", + "resultData": {} + } + } + return response_data + +def sendCallback(url, data): + try: + if type(data) is not dict: + data = {"msg": data} + current_app.logger.info("sending callback") + requests.post(url, json=data) + except Exception as e: + current_app.logger.info(e) + return + +def get_request_data(request): + if not request.is_json: + raise ValueError("request must be json") + requestData = request.get_json() + return requestData + + +def valid_json(data): + + try: + _ = json.loads(data) + except ValueError as e: + return False + return True +def get_config(config_file_name): + config = ConfigParser(os.environ) + config.read(config_file_name) + return config + +def validate_request(request_data, isPublish=True): + return + missing_params = [] + + if 'topic_name' not in request_data: + missing_params.append("topic_name") + if isPublish: + if 'data' not in request_data: + missing_params.append('data') + else: + if 'consumer_group' not in request_data: + missing_params.append('consumer_group') + if 'consumer_id' not in request_data: + missing_params.append('consumer_id') + + if missing_params: + err_msg = '{} request requires the following: '.format('publish' if isPublish else 'subscribe') + err_msg += ','.join(missing_params) + raise KeyError(err_msg) + + +def build_url(config, request_data, is_publish=True): + if is_publish: + base_path = config['resource']['base_address'] + config['resource']['publish'] + topic_name = request_data['topic_name'] + publish_address = base_path.format(topic_name=topic_name) + return publish_address + + base_path = config['resource']['base_address'] + config['resource']['subscribe'] + topic_name = request_data['topic_name'] + consumer_group = request_data['consumer_group'] + consumer_id = request_data['consumer_id'] + subscribe_address = base_path.format(topic_name=topic_name, consumer_group=consumer_group, consumer_id=consumer_id) + if ('timeout' in request_data): + subscribe_address = (subscribe_address + '?timeout={}').format(request_data['timeout']) + return subscribe_address + + +def send_request(url, config, is_subscribe_request=False, payload=None): + # setup default values + auth_enabled = config.getboolean('auth', 'auth_enabled') + proxy_enabled = config.getboolean('resource', 'proxy_enabled') + username = '' + password = '' + req_proxies = { + 'http': None, + 'https': None + } + # place proxy and authentication information + if auth_enabled: + username = config['auth']['username'] + password = config['auth']['password'] + if proxy_enabled: + req_proxies['http'] = config['resource']['http_proxy'] + req_proxies['https'] = config['resource']['https_proxy'] + + # for subscribe request + if is_subscribe_request: + return requests.get(url, + auth=(username, password) if auth_enabled else None, + proxies=req_proxies if proxy_enabled else None) + # for publish request + req_headers = {'Content-type': 'application/json'} + return requests.post(url, + json=payload, + auth=(username, password) if auth_enabled else None, + proxies=req_proxies if proxy_enabled else None, + headers=req_headers) diff --git a/a1-policy-manager-vth/app/helpers/ric_helper.py b/a1-policy-manager-vth/app/helpers/ric_helper.py new file mode 100644 index 0000000..47d55c2 --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/ric_helper.py @@ -0,0 +1,37 @@ +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException + +def get_ric_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'managedElementId'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'managedElementId': json_data['managedElementId'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def get_rics_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + param = { + "policyType": json_data["policyType"] if "policyType" in json_data else "" + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict diff --git a/a1-policy-manager-vth/app/helpers/service_helper.py b/a1-policy-manager-vth/app/helpers/service_helper.py new file mode 100644 index 0000000..16d9b92 --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/service_helper.py @@ -0,0 +1,78 @@ + +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException + +def get_services_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + 'name': json_data['name'] if 'name' in json_data else "" + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def delete_services_using_delete(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'name'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'name': json_data['name'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def put_service_using_put(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'registrationInfo'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'registrationInfo': json_data['registrationInfo'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict + +def keep_alive_service_using_put(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'name'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'name': json_data['name'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict diff --git a/a1-policy-manager-vth/app/helpers/time_helper.py b/a1-policy-manager-vth/app/helpers/time_helper.py new file mode 100644 index 0000000..b882d0b --- /dev/null +++ b/a1-policy-manager-vth/app/helpers/time_helper.py @@ -0,0 +1,24 @@ +""" + Module Info: +""" +import datetime + +def unix_time_millis(d_time): + """ + Args: + Returns: + Examples: + """ + epoch = datetime.datetime.utcfromtimestamp(0) + return (d_time - epoch).total_seconds() * 1000.0 + +def timed_function(func): + """ + Args: + Returns: + Examples: + """ + start_time = unix_time_millis(datetime.datetime.now()) + func() + end_time = unix_time_millis(datetime.datetime.now()) + return end_time - start_time diff --git a/a1-policy-manager-vth/app/models/__init__.py b/a1-policy-manager-vth/app/models/__init__.py new file mode 100644 index 0000000..52319a0 --- /dev/null +++ b/a1-policy-manager-vth/app/models/__init__.py @@ -0,0 +1,6 @@ + +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" diff --git a/a1-policy-manager-vth/app/routes/__init__.py b/a1-policy-manager-vth/app/routes/__init__.py new file mode 100644 index 0000000..89419e1 --- /dev/null +++ b/a1-policy-manager-vth/app/routes/__init__.py @@ -0,0 +1,19 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Routes need to be exported to be usable, if removed, routes will not be found and response + will be a 500. + ROUTE order matters, because ROUTE is like a global var used by all the other modules + it needs to be above them all +""" +from flask import Blueprint +from app.helpers.response_helper import get_config + +ROUTES = Blueprint('routes', __name__) +config = get_config("config.ini") + +from .policy import * +from .ric import * +from .service import * +from .info import * +from .errors import ERRORS diff --git a/a1-policy-manager-vth/app/routes/errors.py b/a1-policy-manager-vth/app/routes/errors.py new file mode 100644 index 0000000..43e1ec1 --- /dev/null +++ b/a1-policy-manager-vth/app/routes/errors.py @@ -0,0 +1,33 @@ +""" +Module Info: +""" +from flask import jsonify, current_app, Blueprint +from app.helpers.error_helper import error_dic +from app.errors.bad_request_exception import BadRequestException +import traceback + +ERRORS = Blueprint('errors', __name__) + +@ERRORS.app_errorhandler(BadRequestException) +def handle_bad_request(error): + """ + Args: + Returns: + Examples: + """ + current_app.logger.info(error) + response = error_dic(error, error.status_code, error.message) + print(traceback.format_exc()) + return jsonify(response), error.status_code + +@ERRORS.app_errorhandler(Exception) +def handle_error(error): + """ + Args: + Returns: + Examples: + """ + status_code = 500 + response = error_dic(error, status_code) + print(traceback.format_exc()) + return jsonify(response), status_code diff --git a/a1-policy-manager-vth/app/routes/info.py b/a1-policy-manager-vth/app/routes/info.py new file mode 100644 index 0000000..7090cf3 --- /dev/null +++ b/a1-policy-manager-vth/app/routes/info.py @@ -0,0 +1,76 @@ +""" +Args: +Returns: +Examples: +""" +import json +import datetime +from flask import current_app, jsonify, request +import time +import requests +from app.errors.bad_request_exception import BadRequestException +from app.helpers.time_helper import unix_time_millis, timed_function +from app.helpers.response_helper import vth_response_dic +from app.helpers import response_helper as ResponseHelper +from app.helpers import action_helper as Info +from . import config, ROUTES + + +@ROUTES.route("/handle_action", methods=['POST']) +def handle_action_request(): + return ResponseHelper.route_check(config=config, post_function = Info.execute_action) + + +@ROUTES.route("/", methods=['GET']) +def get_base(): + """ + Args: + Returns: + Examples: + """ + current_app.logger.info(request.method) + response = vth_response_dic() + data = current_app.url_map + rules = [] + methods_list = [] + for rule in data.iter_rules(): + ma = {rule.rule:[]} + for val in rule.methods: + if (val != "OPTIONS") and (val !="HEAD"): + #print(val) + ma[rule.rule].append(val) + rules.append(ma) + + # methods_set.add(rule.methods) + #print(rule.methods) + #print(rules) + response["vthResponse"]["resultData"] = rules + #current_app.logger.info(current_app.url_map) + current_app.logger.debug("hit health point") + return jsonify(response) + +@ROUTES.route("/health", methods=['GET']) +def get_health(): + """ + Args: + Returns: + Examples: + """ + current_app.logger.debug("hit health point") + return "UP" + +@ROUTES.route("/status", methods=['GET']) +def get_status(): + """ + Args: + Returns: + Examples: + """ + suma = lambda: time.sleep(1) + #current_app.logger.info(current_app.url_map) + current_app.logger.info(unix_time_millis(datetime.datetime.now())) + current_app.logger.info(timed_function(suma)) + current_app.logger.debug("some stuff") + #raise Exception("some error") + raise BadRequestException() + return "Running" diff --git a/a1-policy-manager-vth/app/routes/policy.py b/a1-policy-manager-vth/app/routes/policy.py new file mode 100644 index 0000000..588397f --- /dev/null +++ b/a1-policy-manager-vth/app/routes/policy.py @@ -0,0 +1,225 @@ + +import datetime +import json +import logging +from logging import FileHandler +import os + +import requests +from flask import Flask, request, jsonify +from . import config, ROUTES +from app.helpers import policy_helper as Policy +from app.helpers import response_helper as ResponseHelper +from app.errors.bad_request_exception import BadRequestException + + + +def sendCallback(url, data): + try: + if type(data) is not dict: + data = {"msg": data} + app.logger.info("sending callback") + requests.post(url, json=data) + except Exception as e: + app.logger.info(e) + return + +def unix_time_millis(dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() * 1000.0 + + +def route_check2(get_function=None, post_function=None, put_function=None, delete_function=None): + """ + Info: + Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse + Arguments (**kwargs): pass in the specified key(s) and method(s) that handle the type of method, method must be allowed by route decorator + get_function => type: function + put_function => type: function + delete_function => type: function + Returns: + returns the return of the function call, typically a jsonified response. + you can capture response in a var and execute logic or you can just return the function call/response + E.G.: + response = route_check(post_function = handle_post) + return route_check(get_function = handle_get, post_function = handle_post) + """ + response_dict = ResponseHelper.vth_response_dic() + start_time = unix_time_millis(datetime.datetime.now()) + status_code = 200 + if request.is_json and ResponseHelper.valid_json(request.data): + if(request.method == 'GET'): + response_dict = get_function(request, response_dict, config) + elif(request.method == 'POST'): + response_dict = post_function(request, response_dict, config) + elif(request.method == 'PUT'): + response_dict = put_function(request, response_dict, config) + elif(request.method == 'DELETE'): + response_dict = delete_function(request, response_dict, config) + else: + raise BadRequestException(406, "Invalid Json") + end_time = unix_time_millis(datetime.datetime.now()) + response_dict['vthResponse']['testDurationMS'] = end_time-start_time + return jsonify(response_dict), status_code + + +@ROUTES.route("/policies", methods=['GET']) +def policies(): + pass + +@ROUTES.route("/policy", methods=['GET', 'PUT', 'DELETE']) +def handle_policy(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_using_get, put_function = Policy.put_policy_using_put, delete_function=Policy.delete_policy_using_delete) + + +@ROUTES.route("/policy_ids", methods=['GET']) +def handle_policy_ids(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_ids_using_get) + +@ROUTES.route("/policy_schemas", methods=['GET']) +def handle_policy_schemas(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schemas_using_get) + +@ROUTES.route("/policy_schema", methods=['GET']) +def handle_policy_schema(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schema_using_get) + +@ROUTES.route("/policy_status", methods=['GET']) +def handle_policy_status(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_status_using_get) + +@ROUTES.route("/policy_types", methods=['GET']) +def handle_policy_types(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_types_using_get) + + +@ROUTES.route("/", methods=['POST']) +def executeRicRequest(): + response_data = { + 'vthResponse': { + 'testDuration': '', + 'dateTimeUTC': str(datetime.datetime.now()), + 'abstractMessage': '', + 'resultData': {} + } + } + + startTime = unix_time_millis(datetime.datetime.now()) + ret_url = request.args.get('retURL') + try: + if not request.is_json: + raise ValueError("request must be json") + + requestData = request.get_json() + + app.logger.info("A1 requestData:" + str(requestData)) + + action = requestData['action'].lower() + _check_incoming_request(requestData) + + os.environ['NO_PROXY'] = '127.0.0.1' # TODO testing purpose w/ mock server. Needs to remove on final version + with open('config.json') as configFile: + config = json.load(configFile) + + baseAddress = config['base_address'] + if action == 'health_check' or action == 'list_policy': + res = requests.get(baseAddress + config['actions_path'][action]) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if action == 'health_check': + response_data['vthResponse']['resultData']['resultOutput'] = res.text + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'list_policy_instance': + res = requests.get(baseAddress + config['actions_path'][action] + .format(policy_type_id=requestData['policy_type_id'])) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'get_policy_instance_status': + res = requests.get(baseAddress + config['actions_path'][action] + .format(policy_type_id=requestData['policy_type_id'], + policy_instance_id=requestData['policy_instance_id'])) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'edit_policy': + res = _send_edit_request(requestData, config) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if requestData['request_type'].lower() == 'get' and res.status_code == 200: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.text + elif action == 'edit_policy_instance': + res = _send_edit_request(requestData, config) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if requestData['request_type'].lower() == 'get' and res.status_code == 200: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.text + + except Exception as ex: + endTime = unix_time_millis(datetime.datetime.now()) + totalTime = endTime - startTime + response_data['vthResponse']['testDuration'] = totalTime + response_data['vthResponse']['abstractMessage'] = str(ex) + return jsonify(response_data) + + endTime = unix_time_millis(datetime.datetime.now()) + totalTime = endTime - startTime + + response_data['vthResponse']['testDuration'] = totalTime + + if ret_url is not None: + sendCallback(ret_url, response_data) + return '', 200 + + return jsonify(response_data), 200 + + +def _send_edit_request(request_data, config): + baseAddress = config['base_address'] + path = '' + action = request_data['action'] + policy_type_id = request_data['policy_type_id'] + request_type = request_data['request_type'] + if action == "edit_policy": + path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id) + if action == 'edit_policy_instance': + instance_id = request_data['policy_instance_id'] + path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id, + policy_instance_id=instance_id) + if request_type == 'get': + return requests.get(path) + if request_type == 'put': + payload = request_data['payload'] + return requests.put(path, payload) + if request_type == 'delete': + return requests.delete(path) + + +def _check_incoming_request(requestData): # check if the request is valid + if 'action' not in requestData: + raise KeyError('no action was specify') + + action = requestData['action'].lower() + edit_actions = ['edit_policy', 'edit_policy_instance'] + requires_policy_id = ['edit_policy', 'list_policy_instance' + , 'edit_policy_instance', 'get_policy_instance_status'] + requires_policy_instance_id = ['edit_policy_instance', 'get_policy_instance_status'] + possible_actions = ['health_check', 'list_policy', 'edit_policy', 'list_policy_instance' + , 'edit_policy_instance', 'get_policy_instance_status'] + possible_request_type = ['get', 'put', 'delete'] + + if action not in possible_actions: + raise KeyError("invalid action") + if action in edit_actions: # request type is required + if 'request_type' not in requestData: + raise KeyError('this action: ' + action + ' requires a request type') + if requestData['request_type'] not in possible_request_type: + raise KeyError('this request_type: ' + requestData['request_type'] + ' is not valid') + if requestData['request_type'] == 'put' and 'payload' not in requestData: + raise KeyError('put request requires a payload') + if action in requires_policy_id: + if 'policy_type_id' not in requestData: + raise KeyError('this action: ' + action + ' requires a policy_type_id') + if action in requires_policy_instance_id: + if 'policy_instance_id' not in requestData: + raise KeyError('this action: ' + action + ' requires a policy_instance_id') diff --git a/a1-policy-manager-vth/app/routes/ric.py b/a1-policy-manager-vth/app/routes/ric.py new file mode 100644 index 0000000..8441ac6 --- /dev/null +++ b/a1-policy-manager-vth/app/routes/ric.py @@ -0,0 +1,12 @@ + +from app.helpers import response_helper as ResponseHelper +from app.helpers import ric_helper as Ric +from . import config, ROUTES + +@ROUTES.route("/ric", methods=['GET']) +def handle_ric(): + return ResponseHelper.route_check(config=config, get_function=Ric.get_ric_using_get) + +@ROUTES.route("/rics", methods=['GET']) +def handle_rics(): + return ResponseHelper.route_check(config=config, get_function=Ric.get_rics_using_get) diff --git a/a1-policy-manager-vth/app/routes/service.py b/a1-policy-manager-vth/app/routes/service.py new file mode 100644 index 0000000..e06bf94 --- /dev/null +++ b/a1-policy-manager-vth/app/routes/service.py @@ -0,0 +1,16 @@ +from app.helpers import response_helper as ResponseHelper +from app.helpers import service_helper as Service +from . import config, ROUTES + +@ROUTES.route("/services", methods=['GET', 'DELETE']) +def handleS_services(): + return ResponseHelper.route_check(config=config, get_function=Service.get_services_using_get, delete_function=Service.delete_services_using_delete) + + +@ROUTES.route("/service", methods=['PUT']) +def handle_service(): + return ResponseHelper.route_check(config=config, put_function=Service.put_service_using_put) + +@ROUTES.route("/services/keepalive", methods=['PUT']) +def handle_services_keepalive(): + return ResponseHelper.route_check(config=config, put_function=Service.keep_alive_service_using_put) diff --git a/a1-policy-manager-vth/config.ini b/a1-policy-manager-vth/config.ini new file mode 100644 index 0000000..e9bc817 --- /dev/null +++ b/a1-policy-manager-vth/config.ini @@ -0,0 +1,14 @@ +[auth] +creds_enabled= %(USE_CRED)s +username = %(USER)s +password = %(PW)s +[api] +base_url= %(API_URL)s +port= %(API_PORT)s +[resource] +proxy_enabled = %(USE_PROXY)s +https_proxy= %(HTTPS)s +http_proxy= %(HTTP)s +base_address = %(API_URL)s +publish = /{topic_name} +subscribe = /{topic_name}/{consumer_group}/{consumer_id} diff --git a/a1-policy-manager-vth/doc/a1-documentation.docx b/a1-policy-manager-vth/doc/a1-documentation.docx new file mode 100644 index 0000000..dada0b5 Binary files /dev/null and b/a1-policy-manager-vth/doc/a1-documentation.docx differ diff --git a/a1-policy-manager-vth/docker/Dockerfile b/a1-policy-manager-vth/docker/Dockerfile new file mode 100644 index 0000000..960368c --- /dev/null +++ b/a1-policy-manager-vth/docker/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.7.4 + +RUN python --version + +ADD pip-requirements.txt pip-requirements.txt +ADD app app +ADD config.ini config.ini +ADD run.py run.py + +RUN mkdir -p /otf/logs + +RUN python -m pip install --proxy http://one.proxy.att.com:8080 -r pip-requirements.txt + +ENV USER=default_user +ENV PW=default_pass + + +ENTRYPOINT ["python", "run.py"] diff --git a/a1-policy-manager-vth/docker/container-tag.yaml b/a1-policy-manager-vth/docker/container-tag.yaml new file mode 100644 index 0000000..ee078db --- /dev/null +++ b/a1-policy-manager-vth/docker/container-tag.yaml @@ -0,0 +1,15 @@ +--- +# Copyright (c) 2019 AT&T Intellectual Property. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +tag: 0.0.1 diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore b/a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore new file mode 100644 index 0000000..daebc7d --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml new file mode 100644 index 0000000..66e0b29 --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for the a1 policy manager Virtual Test Head +name: a1-policy-manager-vth +version: 0.0.1 diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml new file mode 100644 index 0000000..89d053d --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/deployment.yaml @@ -0,0 +1,137 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ .Values.appName}} + namespace: {{.Values.namespace}} + labels: + app: {{ .Values.appName}} + version: {{.Values.version}} +spec: + revisionHistoryLimit: 1 + minReadySeconds: 10 + strategy: + # indicate which strategy we want for rolling update + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + replicas: {{ .Values.replicas}} + selector: + matchLabels: + app: {{ .Values.appName}} + version: {{.Values.version}} + template: + metadata: + labels: + app: {{ .Values.appName}} + version: {{.Values.version}} + spec: + serviceAccount: default + volumes: + - name: {{ .Values.appName}}-cert-volume + secret: + secretName: {{.Values.sharedCert}} + optional: true + items: + - key: PEM_CERT + path: otf.pem + - key: PEM_KEY + path: privateKey.pem +# {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} TODO UNCOMMENT WHEN PUSHING TO ORAN +# {{else}} +# - name: logging-pvc +# persistentVolumeClaim: +# {{if eq .Values.env "prod"}} +# claimName: {{ .Values.pvc.prod | quote }} +# {{ else }} +# claimName: {{ .Values.pvc.dev | quote }} +# {{ end }} +# {{end}} + containers: + - name: {{ .Values.appName}} + image: {{ .Values.image}} + imagePullPolicy: Always + ports: + - name: http + containerPort: 6000 + nodePort: {{.Values.nodePort}} + protocol: TCP +# {{ if eq .Values.env "st"}} TODO UNCOMMENT FOR ORAN? +# resources: +# limits: +# memory: "512Mi" +# cpu: "500m" +# requests: +# memory: "256Mi" +# cpu: "100m" +# {{else}} +# resources: +# limits: +# memory: "1Gi" +# cpu: "1" +# requests: +# memory: "1Gi" +# cpu: "1" +# {{end}} + env: + - name: NAMESPACE + value: {{.Values.namespace}} + - name: APP_NAME + value: {{ .Values.appName}} + - name: APP_VERSION + value: {{.Values.version}} + - name: USE_CRED + value: {{.Values.auth.enabled | quote }} + - name: USER + valueFrom: + secretKeyRef: + name: {{ .Values.appName}} + key: api_user + optional: true + - name: PW + valueFrom: + secretKeyRef: + name: {{ .Values.appName}} + key: api_pass + optional: true + - name: USE_PROXY + value: {{.Values.proxy.enabled | quote }} + - name: HTTPS + value: {{.Values.proxy.https | quote }} + - name: HTTP + value: {{.Values.proxy.http | quote }} + - name: API_URL + value: {{.Values.api.base_url}} + - name: API_PORT + value: {{.Values.api.port | quote }} + volumeMounts: + - name: {{.Values.appName}}-cert-volume + mountPath: /opt/cert +# {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} +# {{else}} +# - name: logging-pvc +# mountPath: "/otf/logs" +# {{end}} + livenessProbe: + httpGet: + path: {{.Values.health}} + port: http + scheme: HTTP + httpHeaders: + - name: X-Custom-Header + value: Alive + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 30 + readinessProbe: + httpGet: + path: {{.Values.health}} + port: http + scheme: HTTP + httpHeaders: + - name: X-Custom-Header + value: Ready + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 30 + restartPolicy: Always diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml new file mode 100644 index 0000000..1aabe3d --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/secret.yaml @@ -0,0 +1,9 @@ + +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.appName}} +type: Opaque +data: + api_user: {{ .Values.auth.user | b64enc }} + api_pass: {{ .Values.auth.pw | b64enc }} diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml new file mode 100644 index 0000000..291f9fc --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.appName }} + namespace: {{ .Values.namespace}} + labels: + app: {{ .Values.appName }} + version: {{ .Values.version}} +spec: + type: NodePort + ports: + - name: http + port: 6000 + protocol: TCP + nodePort: {{ .Values.nodePort}} + selector: + app: {{ .Values.appName }} + version: {{ .Values.version}} diff --git a/a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml b/a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml new file mode 100644 index 0000000..5c3e5a0 --- /dev/null +++ b/a1-policy-manager-vth/helm/a1-policy-manager-vth/values.yaml @@ -0,0 +1,23 @@ +appName: a1-policy-manager-vth +env: dev +version: 0.0.1-SNAPSHOT +image: dockercentral.it.att.com:5100/com.att.ecomp.otf.dev/a1-policy-manager-vth:0.0.1-SNAPSHOT +namespace: com-att-ecomp-otf-dev #org-oran-otf +nodePort: 32330 +replicas: 1 +health : /otf/vth/oran/a1/v1/health +sharedCert: otf-cert-secret-builder +pvc: + dev: org-oran-otf-dev-logs-pv + prod: org-oran-otf-prod-logs-pv +auth: + enabled: true + user: user + pw: pw +proxy: + enabled: false + http: + https: +api: + base_url: http://njcdtl08rg9907.itservices.sbc.com + port: 3000 diff --git a/a1-policy-manager-vth/pip-requirements.txt b/a1-policy-manager-vth/pip-requirements.txt new file mode 100644 index 0000000..d25f478 --- /dev/null +++ b/a1-policy-manager-vth/pip-requirements.txt @@ -0,0 +1,6 @@ +flask +flask-cors +FLASK +FLASK-CORS +requests +configparser diff --git a/a1-policy-manager-vth/run.py b/a1-policy-manager-vth/run.py new file mode 100644 index 0000000..613ff4f --- /dev/null +++ b/a1-policy-manager-vth/run.py @@ -0,0 +1,52 @@ +""" +# Copyright (c) 2019 AT&T Intellectual Property. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +################################################################################ +# File name: a1-policy-manager-vth.py # +# Description: vth for A1 service # +# Date created: 04/22/2020 # +# Last modified: 04/30/2020 # +# Python Version: 3.7.4 # +# Author: Raul Gomez (rg9907) # +# Email: rg9907@att.com # +################################################################################ +""" +import logging +from logging import FileHandler +from flask import Flask +from flask.logging import create_logger +from app.routes import ROUTES, ERRORS +#from dotenv import load_dotenv + +#load dev env vars +#load_dotenv() +# redirect http to https +APP = Flask(__name__) +LOG = create_logger(APP) + +# Prevents print statement every time an endpoint is triggered. +logging.getLogger("werkzeug").setLevel(logging.DEBUG) +#logging.getLogger("werkzeug").setLevel(logging.WARNING) +APP.register_blueprint(ERRORS) +APP.register_blueprint(ROUTES, url_prefix="/otf/vth/oran/a1/v1") + +if __name__ == '__main__': + LOG_HANDLER = FileHandler('a1-policy-manager.log', mode='a') + LOG_HANDLER.setLevel(logging.INFO) + LOG.setLevel(logging.INFO) + LOG.addHandler(LOG_HANDLER) + #context = ('opt/cert/otf.pem', 'opt/cert/privateKey.pem') + # app.run(debug = False, host = '0.0.0.0', port = 5000, ssl_context = context) + APP.run(debug=False, host='0.0.0.0', port=6000) + #APP.run(debug=False, host='0.0.0.0', port=6000, ssl_context = context) diff --git a/a1-sdnc-vth/.environ b/a1-sdnc-vth/.environ new file mode 100644 index 0000000..0b3fa87 --- /dev/null +++ b/a1-sdnc-vth/.environ @@ -0,0 +1,9 @@ +#if using dotenv change file name to .env and set env variables below +USER=String +PW=String +AUTH=Boolean +PROXY=Boolean +HTTP=String +HTTPS=String +API_URL=String +API_PORT=Int diff --git a/a1-sdnc-vth/Jenkinsfile b/a1-sdnc-vth/Jenkinsfile new file mode 100644 index 0000000..a9bfbaa --- /dev/null +++ b/a1-sdnc-vth/Jenkinsfile @@ -0,0 +1,158 @@ +#!/usr/bin/env groovy + +/* Copyright (c) 2019 AT&T Intellectual Property. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +##############################################################################*/ + + +properties([[$class: 'ParametersDefinitionProperty', parameterDefinitions: [ + [$class: 'hudson.model.StringParameterDefinition', name: 'PHASE', defaultValue: "BUILD"], + [$class: 'hudson.model.StringParameterDefinition', name: 'ENV', defaultValue: "dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'MECHID', defaultValue: "m13591_otf_dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'KUBE_CONFIG', defaultValue: "kubeConfig-dev"], + [$class: 'hudson.model.StringParameterDefinition', name: 'TILLER_NAMESPACE', defaultValue: "com-att-ecomp-otf-dev"] +]]]) + + + echo "Build branch: ${env.BRANCH_NAME}" + + node("docker"){ + stage 'Checkout' + checkout scm + PHASES=PHASE.tokenize( '_' ); + echo "PHASES : " + PHASES + + + ARTIFACT_ID="a1-sdnc-vth"; + VERSION="0.0.1-SNAPSHOT"; + NAMESPACE="com.att.ecomp.otf" //TODO change back to org-otf-oran when done testing + DOCKER_REGISTRY="dockercentral.it.att.com:5100" + + if( ENV.equalsIgnoreCase("dev") ){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".dev" + "/" + ARTIFACT_ID + ":" + VERSION + + } + if( ENV.equalsIgnoreCase("prod") || ENV.equalsIgnoreCase("prod-dr")){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".prod" + "/" + ARTIFACT_ID + ":" + VERSION + + } + + if( ENV.equalsIgnoreCase("st") ){ + IMAGE_NAME=DOCKER_REGISTRY + "/" + NAMESPACE + ".st" + "/" + ARTIFACT_ID + ":" + VERSION + + } + + echo "Artifact: " + IMAGE_NAME + + withEnv(["PATH=${env.PATH}:${env.WORKSPACE}/linux-amd64", "HELM_HOME=${env.WORKSPACE}"]) { + + echo "PATH=${env.PATH}" + echo "HELM_HOME=${env.HELM_HOME}" + + if (PHASES.contains("BUILD")){ + dir("./a1-sdnc-vth"){ + stage 'Publish Artifact' + + withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { + + echo "Artifact: " + IMAGE_NAME + + sh """ + docker login $DOCKER_REGISTRY --username $USERNAME --password $PASSWORD + docker build -t $IMAGE_NAME . + docker push $IMAGE_NAME + """ + } + } + + + } + + if (PHASES.contains("DEPLOY") || PHASES.contains("UNDEPLOY")) { + + stage 'Init Helm' + + //check if helm exists if not install + if(fileExists('linux-amd64/helm')){ + sh """ + echo "helm is already installed" + """ + } + else{ + //download helm + sh """ + echo "installing helm" + wget https://storage.googleapis.com/kubernetes-helm/helm-v2.14.3-linux-amd64.tar.gz + tar -xf helm-v2.14.3-linux-amd64.tar.gz + rm helm-v2.14.3-linux-amd64.tar.gz + """ + } + + withCredentials([file(credentialsId: KUBE_CONFIG, variable: 'KUBECONFIG')]) { + + dir('a1-sdnc-vth/helm'){ + //check if charts are valid, and then perform dry run, if successful then upgrade/install charts + + if (PHASES.contains("UNDEPLOY") ) { + stage 'Undeploy' + + sh """ + helm delete --tiller-namespace=$TILLER_NAMESPACE --purge $ARTIFACT_ID + """ + } + + //NOTE Double quotes are used below to access groovy variables like artifact_id and tiller_namespace + if (PHASES.contains("DEPLOY") ){ + stage 'Deploy' + withCredentials([usernamePassword(credentialsId: MECHID, usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) { + + sh """ + echo "Validate Yaml" + helm lint $ARTIFACT_ID + + echo "View Helm Templates" + helm template $ARTIFACT_ID --set appName=$ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + echo "Perform Dry Run Of Install" + helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install --dry-run $ARTIFACT_ID $ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + + echo "Helm Install/Upgrade" + helm upgrade --tiller-namespace=$TILLER_NAMESPACE --install $ARTIFACT_ID $ARTIFACT_ID \ + --set appName=$ARTIFACT_ID \ + --set version=$VERSION \ + --set env=$ENV \ + --set image=$IMAGE_NAME \ + --set namespace=$TILLER_NAMESPACE + + """ + } + } + + } + } + } + + } + } diff --git a/a1-sdnc-vth/app/__init__.py b/a1-sdnc-vth/app/__init__.py new file mode 100644 index 0000000..14b5496 --- /dev/null +++ b/a1-sdnc-vth/app/__init__.py @@ -0,0 +1,11 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Import everything using star, methods or anything that should not be + used by the outside modules should not be imported on the nested + __init__ files. +""" +from .routes import * +from .errors import * +from .models import * +from .helpers import * diff --git a/a1-sdnc-vth/app/errors/__init__.py b/a1-sdnc-vth/app/errors/__init__.py new file mode 100644 index 0000000..b491f42 --- /dev/null +++ b/a1-sdnc-vth/app/errors/__init__.py @@ -0,0 +1,6 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" +from .bad_request_exception import BadRequestException diff --git a/a1-sdnc-vth/app/errors/bad_request_exception.py b/a1-sdnc-vth/app/errors/bad_request_exception.py new file mode 100644 index 0000000..a3e3d22 --- /dev/null +++ b/a1-sdnc-vth/app/errors/bad_request_exception.py @@ -0,0 +1,21 @@ +""" +Args: +Returns: +Examples: +""" +class BadRequestException(Exception): + """ + Args: + Returns: + Examples: + """ + def __init__(self, status_code=406, message="Not Acceptable Response"): + cases = { + 401:"Unauthorized", + 403:"Forbidden", + 404:"Not Found", + 423:"Not Operational" + } + super().__init__(cases.get(status_code, message)) + self.status_code = status_code + self.message = message diff --git a/a1-sdnc-vth/app/helpers/__init__.py b/a1-sdnc-vth/app/helpers/__init__.py new file mode 100644 index 0000000..3313af8 --- /dev/null +++ b/a1-sdnc-vth/app/helpers/__init__.py @@ -0,0 +1,12 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" +from .error_helper import * +from .response_helper import * +from .time_helper import * +from .policy_helper import * +from .service_helper import * +from .ric_helper import * +from .action_helper import * diff --git a/a1-sdnc-vth/app/helpers/action_helper.py b/a1-sdnc-vth/app/helpers/action_helper.py new file mode 100644 index 0000000..a4f7a3a --- /dev/null +++ b/a1-sdnc-vth/app/helpers/action_helper.py @@ -0,0 +1,42 @@ +import json +import ast +from app.helpers import response_helper as ResponseHelper +from flask import current_app, jsonify +from app.errors.bad_request_exception import BadRequestException +import requests + + +def execute_action(request, response_dict, config): + headers = ResponseHelper.create_headers(); + request_data = request.json + action_request = request_data.get("action").lower() + + creds = ResponseHelper.get_credentials(request_data, config) + proxies = ResponseHelper.get_proxies(config) + url = ResponseHelper.create_url(config=config, uri_path="/restconf/operations/A1-ADAPTER-API:"+action_request) +# ret_url = request.args.get('retURL') + + json_req = ast.literal_eval(request_data["action_data"]["jsonBody"]) + current_app.logger.info("Requesting Url: {}, body: {}, auth: {}, proxies: {}".format(url, json_req, creds, proxies)) + try: + res = requests.post(url, proxies=proxies, auth=creds, headers=headers, json=json_req) + response = { + "status_code":res.status_code, + "result": res.json() + } + except(json.decoder.JSONDecodeError): + response = { + "status_code":res.status_code, + "result": res.reason + } + except requests.exceptions.RequestException: + response = { + "status_code":504, + "result": "Something Happned" + } + finally: + response_dict['vthResponse']['resultData'] = response +# if ret_url is not None: +# ResponseHelper.sendCallback(ret_url,response_dict) +# return '',200 + return response_dict diff --git a/a1-sdnc-vth/app/helpers/error_helper.py b/a1-sdnc-vth/app/helpers/error_helper.py new file mode 100644 index 0000000..b34cedf --- /dev/null +++ b/a1-sdnc-vth/app/helpers/error_helper.py @@ -0,0 +1,51 @@ +from flask import current_app +import datetime +""" +Args: +Returns: +Examples: +""" + +def error_dic(error, status_code, response_message="Something went wrong, vth encountered an error"): + """ + Args: + Returns: + Examples: + """ + message = [str(x) for x in error.args] + error_log={ + "error":{ + "type": error.__class__.__name__, + "message": message + } + } + response_data = { + "vthResponse": { + "testDurationMS": 0, + 'dateTimeUTC': str(datetime.datetime.now()), + "abstractMessage": "Failed", + "error":response_message, + "status_code": status_code, + "resultData": {} + } + } + current_app.logger.error(error_log) + return response_data + +def error_dic2(error, status_code=500): + """ + Args: + Returns: + Examples: + """ + message = [str(x) for x in error.args] + response = { + "status_code" : status_code, + "success": False, + "error":{ + "type": error.__class__.__name__, + "message": message + } + } + return response + diff --git a/a1-sdnc-vth/app/helpers/policy_helper.py b/a1-sdnc-vth/app/helpers/policy_helper.py new file mode 100644 index 0000000..ea4fedc --- /dev/null +++ b/a1-sdnc-vth/app/helpers/policy_helper.py @@ -0,0 +1,163 @@ +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException +import requests + +def get_policy_using_get(request, response_dict, config): + json_data = request.get_json() + if 'id' not in json_data: raise BadRequestException(406, "Request is missing id") + param = {'id': json_data['id']} + creds = ResponseHelper.get_credentials(json_data, config) + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + + return response_dict +def put_policy_using_put(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + + current_app.logger.info("creds: {}".format(creds)) + + required = {'id', 'jsonBody', 'ric', 'service'} + param_keys = {'id', 'ric', 'service'} + optional = {"type"} + data_keys = param_keys.copy() + keys = set(json_data.keys()) + if not required <= keys: + raise BadRequestException(406, "Request is missing required values {}".format(required)) + if optional <= keys: data_keys.update(optional) + param = {} + body = {} + for key in data_keys: + param[key] = json_data[key] + body['jsonBody'] = json_data['jsonBody'] + + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.put(url, auth=creds, params=param, json=body) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def delete_policy_using_delete(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = {'id': json_data['id']} + + url = ResponseHelper.create_url(config=config, uri_path="/policy") + res = requests.delete(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + +def get_policy_ids_using_get(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + "ric":json_data["ric"] if "ric" in json_data else "", + "service":json_data["service"] if "service" in json_data else "", + "type":json_data["type"] if "type" in json_data else "" + } + + url = ResponseHelper.create_url(config=config, uri_path="/policy_ids") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + +def get_policy_schema_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = {'id': json_data['id']} + + url = ResponseHelper.create_url(config=config, uri_path="/policy_schema") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def get_policy_schemas_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + "ric":json_data['ric'] if 'ric' in json_data else "" + } + #api_response = requests.put(url, credentials=creds, params=param) + + url = ResponseHelper.create_url(config=config, uri_path="/policy_schemas") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict +def get_policy_status_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'id'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + param = { + "id":json_data["id"] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def get_policy_types_using_get(request, response_dict, config): + json_data = request.get_json() + creds = ResponseHelper.get_credentials(json_data, config) + param = { + 'ric': json_data['ric'] if 'ric' in json_data else "" + } + + url = ResponseHelper.create_url(config=config, uri_path="/a1-p/policytypes") + res = requests.get(url, auth=creds, params=param) + response = { + "status_code":res.status_code, + "result": res.json() + } + response_dict['vthResponse']['resultData'] = response + return response_dict + diff --git a/a1-sdnc-vth/app/helpers/response_helper.py b/a1-sdnc-vth/app/helpers/response_helper.py new file mode 100644 index 0000000..833598f --- /dev/null +++ b/a1-sdnc-vth/app/helpers/response_helper.py @@ -0,0 +1,218 @@ +import ast +import requests +from configparser import ConfigParser +import os +import datetime +import json +from flask import request, jsonify, current_app +from app.helpers.time_helper import unix_time_millis +from app.errors.bad_request_exception import BadRequestException + +""" + Module Info: +""" +def create_headers(enable_cache=True, content_type="application/json", connection="Keep-Alive"): + headers = {'Cache-Control':'no-cache, no-store, must-revalidate', "Pragma":"no-cache", "Expires":"0"} if not enable_cache else {} + headers['content-type'] = content_type + headers['connection'] = connection + return headers +def create_url(config=None, uri_path = "/", url_string=None): + return config['api']['base_url'] +":"+ config['api']['port']+uri_path if url_string is None else url_string + +def valid_string_json(string, response_message="Invalid json string in query or jsonBody, format requires quoted json object e.g. \"{'key':'value, key2:{'innerKey':'innerValue'}}\""): + try: + string_to_dict = ast.literal_eval(string) + except(Exception): + raise BadRequestException(406, response_message) + return True +def route_check(config=None, get_function=None, post_function=None, put_function=None, delete_function=None): + """ + Info: + Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse + Arguments (**kwargs): pass in the specified key(s) and method(s) that handle the type of method, method must be allowed by route decorator + get_function => type: function + put_function => type: function + delete_function => type: function + Returns: + returns the return of the function call, typically a jsonified response. + you can capture response in a var and execute logic or you can just return the function call/response + E.G.: + response = route_check(post_function = handle_post) + return route_check(get_function = handle_get, post_function = handle_post) + """ + if not request.is_json: raise BadRequestException(406, "Invalid Json Request") + + response_dict = vth_response_dic() + start_time = unix_time_millis(datetime.datetime.now()) + status_code = 200 + ret_url = request.args.get('retURL') + + query = "" + json_body = "" + request_data = request.json + json_keys = set(request_data) + action_request = request_data.get("action").lower() + valid_actions = {"geta1policytype", "geta1policy", "puta1policy", "deletea1policy", "geta1policystatus"} + required_keys = {"action", "auth", "action_data"} + + #check for valid action and json request contains required keys + if not required_keys <= json_keys: raise BadRequestException(406, "Json request is missing required keys {}".format(required_keys)) + if not action_request in valid_actions: raise BadRequestException(406, "Action is not supported {}".format(action_request)) + #check request's action_data key contains required keys + if 'query' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody ") + if 'jsonBody' not in request.json['action_data']: raise BadRequestException(406, "action_data must contain query and jsonBody") + + query = request.json['action_data']['query'] if 'query' in request.json['action_data'] else "" + json_body = request.json['action_data']['jsonBody'] if 'jsonBody' in request.json['action_data'] else "" + + if valid_string_json(query) and valid_string_json(json_body): + if(request.method == 'GET'): + response_dict = get_function(request, response_dict, config) + elif(request.method == 'POST'): + response_dict = post_function(request, response_dict, config) + elif(request.method == 'PUT'): + response_dict = put_function(request, response_dict, config) + elif(request.method == 'DELETE'): + response_dict = delete_function(request, response_dict, config) + else: + raise BadRequestException(406, "Invalid JSON Strings") + end_time = unix_time_millis(datetime.datetime.now()) + response_dict['vthResponse']['testDurationMS'] = end_time-start_time + if ret_url is not None: + sendCallback(ret_url,response_dict) + return '',200 + return jsonify(response_dict), status_code + +def get_proxies(config): + proxy_enabled = config.getboolean('resource', 'proxy_enabled') + req_proxies = { + 'http': None, + 'https': None + } + if not proxy_enabled: + return None + else: + req_proxies['http'] = config['resource']['http_proxy'] + req_proxies['https'] = config['resource']['https_proxy'] + return req_proxies +def get_credentials(json_data, config): + auth_enabled = config.getboolean('auth', 'creds_enabled') + if not auth_enabled: + return None + else: + username = config['auth']['username'] if 'username' not in json_data['auth'] else json_data['auth']['username'] + password = config['auth']['password'] if 'password' not in json_data['auth'] else json_data['auth']['password'] + return (username, password) +def vth_response_dic(): + """ + Args: + Returns: + Examples: + """ + response_data = { + "vthResponse": { + "testDurationMS": "", + 'dateTimeUTC': str(datetime.datetime.now()), + "abstractMessage": "Success", + "resultData": {} + } + } + return response_data +#TODO data is data from callback and not my json response +def sendCallback(url, data): + try: + if type(data) is not dict: + data = {"msg": data} + current_app.logger.info("sending callback") + requests.post(url, json=data) + except Exception as e: + current_app.logger.info(e) + return + +def get_request_data(request): + if not request.is_json: + raise ValueError("request must be json") + requestData = request.get_json() + return requestData + + +def valid_json(data): + + try: + _ = json.loads(data) + except ValueError as e: + return False + return True +def get_config(config_file_name): + config = ConfigParser(os.environ) + config.read(config_file_name) + return config + +def validate_request(request_data, isPublish=True): + return + missing_params = [] + + if 'topic_name' not in request_data: + missing_params.append("topic_name") + if isPublish: + if 'data' not in request_data: + missing_params.append('data') + else: + if 'consumer_group' not in request_data: + missing_params.append('consumer_group') + if 'consumer_id' not in request_data: + missing_params.append('consumer_id') + + if missing_params: + err_msg = '{} request requires the following: '.format('publish' if isPublish else 'subscribe') + err_msg += ','.join(missing_params) + raise KeyError(err_msg) + + +def build_url(config, request_data, is_publish=True): + if is_publish: + base_path = config['resource']['base_address'] + config['resource']['publish'] + topic_name = request_data['topic_name'] + publish_address = base_path.format(topic_name=topic_name) + return publish_address + + base_path = config['resource']['base_address'] + config['resource']['subscribe'] + topic_name = request_data['topic_name'] + consumer_group = request_data['consumer_group'] + consumer_id = request_data['consumer_id'] + subscribe_address = base_path.format(topic_name=topic_name, consumer_group=consumer_group, consumer_id=consumer_id) + if ('timeout' in request_data): + subscribe_address = (subscribe_address + '?timeout={}').format(request_data['timeout']) + return subscribe_address + + +def send_request(url, config, is_subscribe_request=False, payload=None): + # setup default values + auth_enabled = config.getboolean('auth', 'auth_enabled') + proxy_enabled = config.getboolean('resource', 'proxy_enabled') + username = '' + password = '' + req_proxies = { + 'http': None, + 'https': None + } + # place proxy and authentication information + if auth_enabled: + username = config['auth']['username'] + password = config['auth']['password'] + if proxy_enabled: + req_proxies['http'] = config['resource']['http_proxy'] + req_proxies['https'] = config['resource']['https_proxy'] + + # for subscribe request + if is_subscribe_request: + return requests.get(url, + auth=(username, password) if auth_enabled else None, + proxies=req_proxies if proxy_enabled else None) + # for publish request + req_headers = {'Content-type': 'application/json'} + return requests.post(url, + json=payload, + auth=(username, password) if auth_enabled else None, + proxies=req_proxies if proxy_enabled else None, + headers=req_headers) diff --git a/a1-sdnc-vth/app/helpers/ric_helper.py b/a1-sdnc-vth/app/helpers/ric_helper.py new file mode 100644 index 0000000..47d55c2 --- /dev/null +++ b/a1-sdnc-vth/app/helpers/ric_helper.py @@ -0,0 +1,37 @@ +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException + +def get_ric_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'managedElementId'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'managedElementId': json_data['managedElementId'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def get_rics_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + param = { + "policyType": json_data["policyType"] if "policyType" in json_data else "" + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict diff --git a/a1-sdnc-vth/app/helpers/service_helper.py b/a1-sdnc-vth/app/helpers/service_helper.py new file mode 100644 index 0000000..16d9b92 --- /dev/null +++ b/a1-sdnc-vth/app/helpers/service_helper.py @@ -0,0 +1,78 @@ + +from app.helpers import response_helper as ResponseHelper +from flask import current_app +from app.errors.bad_request_exception import BadRequestException + +def get_services_using_get(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + param = { + 'name': json_data['name'] if 'name' in json_data else "" + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def delete_services_using_delete(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'name'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'name': json_data['name'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict +def put_service_using_put(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'registrationInfo'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'registrationInfo': json_data['registrationInfo'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict + +def keep_alive_service_using_put(request, response_dict, config): + json_data = request.get_json() + #username = config['auth']['username'] if 'username' not in json_data else json_data['username'] + #password = config['auth']['password'] if 'password' not in json_data else json_data['password'] + #creds = (username, password) + creds = ResponseHelper.get_credentials(json_data, config) + current_app.logger.info("creds: {}".format(creds)) + + keys = set(json_data.keys()) + required = {'name'} + if not required <= keys: raise BadRequestException(406, "Request is missing required values {}".format(required)) + + param = { + 'name': json_data['name'] + } + + response_dict['vthResponse']['resultData'] = param + #api_response = requests.get(url, credentials=creds, params=param) + return response_dict diff --git a/a1-sdnc-vth/app/helpers/time_helper.py b/a1-sdnc-vth/app/helpers/time_helper.py new file mode 100644 index 0000000..b882d0b --- /dev/null +++ b/a1-sdnc-vth/app/helpers/time_helper.py @@ -0,0 +1,24 @@ +""" + Module Info: +""" +import datetime + +def unix_time_millis(d_time): + """ + Args: + Returns: + Examples: + """ + epoch = datetime.datetime.utcfromtimestamp(0) + return (d_time - epoch).total_seconds() * 1000.0 + +def timed_function(func): + """ + Args: + Returns: + Examples: + """ + start_time = unix_time_millis(datetime.datetime.now()) + func() + end_time = unix_time_millis(datetime.datetime.now()) + return end_time - start_time diff --git a/a1-sdnc-vth/app/models/__init__.py b/a1-sdnc-vth/app/models/__init__.py new file mode 100644 index 0000000..52319a0 --- /dev/null +++ b/a1-sdnc-vth/app/models/__init__.py @@ -0,0 +1,6 @@ + +""" + Module Info: + Anything imported to this file will be available to outside modules. + Only imort methods that can be used and are used by outside modules +""" diff --git a/a1-sdnc-vth/app/routes/__init__.py b/a1-sdnc-vth/app/routes/__init__.py new file mode 100644 index 0000000..89419e1 --- /dev/null +++ b/a1-sdnc-vth/app/routes/__init__.py @@ -0,0 +1,19 @@ +""" + Module Info: + Anything imported to this file will be available to outside modules. + Routes need to be exported to be usable, if removed, routes will not be found and response + will be a 500. + ROUTE order matters, because ROUTE is like a global var used by all the other modules + it needs to be above them all +""" +from flask import Blueprint +from app.helpers.response_helper import get_config + +ROUTES = Blueprint('routes', __name__) +config = get_config("config.ini") + +from .policy import * +from .ric import * +from .service import * +from .info import * +from .errors import ERRORS diff --git a/a1-sdnc-vth/app/routes/errors.py b/a1-sdnc-vth/app/routes/errors.py new file mode 100644 index 0000000..43e1ec1 --- /dev/null +++ b/a1-sdnc-vth/app/routes/errors.py @@ -0,0 +1,33 @@ +""" +Module Info: +""" +from flask import jsonify, current_app, Blueprint +from app.helpers.error_helper import error_dic +from app.errors.bad_request_exception import BadRequestException +import traceback + +ERRORS = Blueprint('errors', __name__) + +@ERRORS.app_errorhandler(BadRequestException) +def handle_bad_request(error): + """ + Args: + Returns: + Examples: + """ + current_app.logger.info(error) + response = error_dic(error, error.status_code, error.message) + print(traceback.format_exc()) + return jsonify(response), error.status_code + +@ERRORS.app_errorhandler(Exception) +def handle_error(error): + """ + Args: + Returns: + Examples: + """ + status_code = 500 + response = error_dic(error, status_code) + print(traceback.format_exc()) + return jsonify(response), status_code diff --git a/a1-sdnc-vth/app/routes/info.py b/a1-sdnc-vth/app/routes/info.py new file mode 100644 index 0000000..7090cf3 --- /dev/null +++ b/a1-sdnc-vth/app/routes/info.py @@ -0,0 +1,76 @@ +""" +Args: +Returns: +Examples: +""" +import json +import datetime +from flask import current_app, jsonify, request +import time +import requests +from app.errors.bad_request_exception import BadRequestException +from app.helpers.time_helper import unix_time_millis, timed_function +from app.helpers.response_helper import vth_response_dic +from app.helpers import response_helper as ResponseHelper +from app.helpers import action_helper as Info +from . import config, ROUTES + + +@ROUTES.route("/handle_action", methods=['POST']) +def handle_action_request(): + return ResponseHelper.route_check(config=config, post_function = Info.execute_action) + + +@ROUTES.route("/", methods=['GET']) +def get_base(): + """ + Args: + Returns: + Examples: + """ + current_app.logger.info(request.method) + response = vth_response_dic() + data = current_app.url_map + rules = [] + methods_list = [] + for rule in data.iter_rules(): + ma = {rule.rule:[]} + for val in rule.methods: + if (val != "OPTIONS") and (val !="HEAD"): + #print(val) + ma[rule.rule].append(val) + rules.append(ma) + + # methods_set.add(rule.methods) + #print(rule.methods) + #print(rules) + response["vthResponse"]["resultData"] = rules + #current_app.logger.info(current_app.url_map) + current_app.logger.debug("hit health point") + return jsonify(response) + +@ROUTES.route("/health", methods=['GET']) +def get_health(): + """ + Args: + Returns: + Examples: + """ + current_app.logger.debug("hit health point") + return "UP" + +@ROUTES.route("/status", methods=['GET']) +def get_status(): + """ + Args: + Returns: + Examples: + """ + suma = lambda: time.sleep(1) + #current_app.logger.info(current_app.url_map) + current_app.logger.info(unix_time_millis(datetime.datetime.now())) + current_app.logger.info(timed_function(suma)) + current_app.logger.debug("some stuff") + #raise Exception("some error") + raise BadRequestException() + return "Running" diff --git a/a1-sdnc-vth/app/routes/policy.py b/a1-sdnc-vth/app/routes/policy.py new file mode 100644 index 0000000..588397f --- /dev/null +++ b/a1-sdnc-vth/app/routes/policy.py @@ -0,0 +1,225 @@ + +import datetime +import json +import logging +from logging import FileHandler +import os + +import requests +from flask import Flask, request, jsonify +from . import config, ROUTES +from app.helpers import policy_helper as Policy +from app.helpers import response_helper as ResponseHelper +from app.errors.bad_request_exception import BadRequestException + + + +def sendCallback(url, data): + try: + if type(data) is not dict: + data = {"msg": data} + app.logger.info("sending callback") + requests.post(url, json=data) + except Exception as e: + app.logger.info(e) + return + +def unix_time_millis(dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() * 1000.0 + + +def route_check2(get_function=None, post_function=None, put_function=None, delete_function=None): + """ + Info: + Since all routes do the same pre-check and have a similar skeleton, this function just refactored the pre-check for code reuse + Arguments (**kwargs): pass in the specified key(s) and method(s) that handle the type of method, method must be allowed by route decorator + get_function => type: function + put_function => type: function + delete_function => type: function + Returns: + returns the return of the function call, typically a jsonified response. + you can capture response in a var and execute logic or you can just return the function call/response + E.G.: + response = route_check(post_function = handle_post) + return route_check(get_function = handle_get, post_function = handle_post) + """ + response_dict = ResponseHelper.vth_response_dic() + start_time = unix_time_millis(datetime.datetime.now()) + status_code = 200 + if request.is_json and ResponseHelper.valid_json(request.data): + if(request.method == 'GET'): + response_dict = get_function(request, response_dict, config) + elif(request.method == 'POST'): + response_dict = post_function(request, response_dict, config) + elif(request.method == 'PUT'): + response_dict = put_function(request, response_dict, config) + elif(request.method == 'DELETE'): + response_dict = delete_function(request, response_dict, config) + else: + raise BadRequestException(406, "Invalid Json") + end_time = unix_time_millis(datetime.datetime.now()) + response_dict['vthResponse']['testDurationMS'] = end_time-start_time + return jsonify(response_dict), status_code + + +@ROUTES.route("/policies", methods=['GET']) +def policies(): + pass + +@ROUTES.route("/policy", methods=['GET', 'PUT', 'DELETE']) +def handle_policy(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_using_get, put_function = Policy.put_policy_using_put, delete_function=Policy.delete_policy_using_delete) + + +@ROUTES.route("/policy_ids", methods=['GET']) +def handle_policy_ids(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_ids_using_get) + +@ROUTES.route("/policy_schemas", methods=['GET']) +def handle_policy_schemas(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schemas_using_get) + +@ROUTES.route("/policy_schema", methods=['GET']) +def handle_policy_schema(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_schema_using_get) + +@ROUTES.route("/policy_status", methods=['GET']) +def handle_policy_status(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_status_using_get) + +@ROUTES.route("/policy_types", methods=['GET']) +def handle_policy_types(): + return ResponseHelper.route_check(config=config, get_function = Policy.get_policy_types_using_get) + + +@ROUTES.route("/", methods=['POST']) +def executeRicRequest(): + response_data = { + 'vthResponse': { + 'testDuration': '', + 'dateTimeUTC': str(datetime.datetime.now()), + 'abstractMessage': '', + 'resultData': {} + } + } + + startTime = unix_time_millis(datetime.datetime.now()) + ret_url = request.args.get('retURL') + try: + if not request.is_json: + raise ValueError("request must be json") + + requestData = request.get_json() + + app.logger.info("A1 requestData:" + str(requestData)) + + action = requestData['action'].lower() + _check_incoming_request(requestData) + + os.environ['NO_PROXY'] = '127.0.0.1' # TODO testing purpose w/ mock server. Needs to remove on final version + with open('config.json') as configFile: + config = json.load(configFile) + + baseAddress = config['base_address'] + if action == 'health_check' or action == 'list_policy': + res = requests.get(baseAddress + config['actions_path'][action]) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if action == 'health_check': + response_data['vthResponse']['resultData']['resultOutput'] = res.text + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'list_policy_instance': + res = requests.get(baseAddress + config['actions_path'][action] + .format(policy_type_id=requestData['policy_type_id'])) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'get_policy_instance_status': + res = requests.get(baseAddress + config['actions_path'][action] + .format(policy_type_id=requestData['policy_type_id'], + policy_instance_id=requestData['policy_instance_id'])) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + elif action == 'edit_policy': + res = _send_edit_request(requestData, config) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if requestData['request_type'].lower() == 'get' and res.status_code == 200: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.text + elif action == 'edit_policy_instance': + res = _send_edit_request(requestData, config) + response_data['vthResponse']['resultData']['statusCode'] = res.status_code + if requestData['request_type'].lower() == 'get' and res.status_code == 200: + response_data['vthResponse']['resultData']['resultOutput'] = res.json() + else: + response_data['vthResponse']['resultData']['resultOutput'] = res.text + + except Exception as ex: + endTime = unix_time_millis(datetime.datetime.now()) + totalTime = endTime - startTime + response_data['vthResponse']['testDuration'] = totalTime + response_data['vthResponse']['abstractMessage'] = str(ex) + return jsonify(response_data) + + endTime = unix_time_millis(datetime.datetime.now()) + totalTime = endTime - startTime + + response_data['vthResponse']['testDuration'] = totalTime + + if ret_url is not None: + sendCallback(ret_url, response_data) + return '', 200 + + return jsonify(response_data), 200 + + +def _send_edit_request(request_data, config): + baseAddress = config['base_address'] + path = '' + action = request_data['action'] + policy_type_id = request_data['policy_type_id'] + request_type = request_data['request_type'] + if action == "edit_policy": + path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id) + if action == 'edit_policy_instance': + instance_id = request_data['policy_instance_id'] + path = baseAddress + config['actions_path'][action].format(policy_type_id=policy_type_id, + policy_instance_id=instance_id) + if request_type == 'get': + return requests.get(path) + if request_type == 'put': + payload = request_data['payload'] + return requests.put(path, payload) + if request_type == 'delete': + return requests.delete(path) + + +def _check_incoming_request(requestData): # check if the request is valid + if 'action' not in requestData: + raise KeyError('no action was specify') + + action = requestData['action'].lower() + edit_actions = ['edit_policy', 'edit_policy_instance'] + requires_policy_id = ['edit_policy', 'list_policy_instance' + , 'edit_policy_instance', 'get_policy_instance_status'] + requires_policy_instance_id = ['edit_policy_instance', 'get_policy_instance_status'] + possible_actions = ['health_check', 'list_policy', 'edit_policy', 'list_policy_instance' + , 'edit_policy_instance', 'get_policy_instance_status'] + possible_request_type = ['get', 'put', 'delete'] + + if action not in possible_actions: + raise KeyError("invalid action") + if action in edit_actions: # request type is required + if 'request_type' not in requestData: + raise KeyError('this action: ' + action + ' requires a request type') + if requestData['request_type'] not in possible_request_type: + raise KeyError('this request_type: ' + requestData['request_type'] + ' is not valid') + if requestData['request_type'] == 'put' and 'payload' not in requestData: + raise KeyError('put request requires a payload') + if action in requires_policy_id: + if 'policy_type_id' not in requestData: + raise KeyError('this action: ' + action + ' requires a policy_type_id') + if action in requires_policy_instance_id: + if 'policy_instance_id' not in requestData: + raise KeyError('this action: ' + action + ' requires a policy_instance_id') diff --git a/a1-sdnc-vth/app/routes/ric.py b/a1-sdnc-vth/app/routes/ric.py new file mode 100644 index 0000000..8441ac6 --- /dev/null +++ b/a1-sdnc-vth/app/routes/ric.py @@ -0,0 +1,12 @@ + +from app.helpers import response_helper as ResponseHelper +from app.helpers import ric_helper as Ric +from . import config, ROUTES + +@ROUTES.route("/ric", methods=['GET']) +def handle_ric(): + return ResponseHelper.route_check(config=config, get_function=Ric.get_ric_using_get) + +@ROUTES.route("/rics", methods=['GET']) +def handle_rics(): + return ResponseHelper.route_check(config=config, get_function=Ric.get_rics_using_get) diff --git a/a1-sdnc-vth/app/routes/service.py b/a1-sdnc-vth/app/routes/service.py new file mode 100644 index 0000000..e06bf94 --- /dev/null +++ b/a1-sdnc-vth/app/routes/service.py @@ -0,0 +1,16 @@ +from app.helpers import response_helper as ResponseHelper +from app.helpers import service_helper as Service +from . import config, ROUTES + +@ROUTES.route("/services", methods=['GET', 'DELETE']) +def handleS_services(): + return ResponseHelper.route_check(config=config, get_function=Service.get_services_using_get, delete_function=Service.delete_services_using_delete) + + +@ROUTES.route("/service", methods=['PUT']) +def handle_service(): + return ResponseHelper.route_check(config=config, put_function=Service.put_service_using_put) + +@ROUTES.route("/services/keepalive", methods=['PUT']) +def handle_services_keepalive(): + return ResponseHelper.route_check(config=config, put_function=Service.keep_alive_service_using_put) diff --git a/a1-sdnc-vth/config.ini b/a1-sdnc-vth/config.ini new file mode 100644 index 0000000..e9bc817 --- /dev/null +++ b/a1-sdnc-vth/config.ini @@ -0,0 +1,14 @@ +[auth] +creds_enabled= %(USE_CRED)s +username = %(USER)s +password = %(PW)s +[api] +base_url= %(API_URL)s +port= %(API_PORT)s +[resource] +proxy_enabled = %(USE_PROXY)s +https_proxy= %(HTTPS)s +http_proxy= %(HTTP)s +base_address = %(API_URL)s +publish = /{topic_name} +subscribe = /{topic_name}/{consumer_group}/{consumer_id} diff --git a/a1-sdnc-vth/doc/a1-documentation.docx b/a1-sdnc-vth/doc/a1-documentation.docx new file mode 100644 index 0000000..335e2a0 Binary files /dev/null and b/a1-sdnc-vth/doc/a1-documentation.docx differ diff --git a/a1-sdnc-vth/docker/Dockerfile b/a1-sdnc-vth/docker/Dockerfile new file mode 100644 index 0000000..960368c --- /dev/null +++ b/a1-sdnc-vth/docker/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.7.4 + +RUN python --version + +ADD pip-requirements.txt pip-requirements.txt +ADD app app +ADD config.ini config.ini +ADD run.py run.py + +RUN mkdir -p /otf/logs + +RUN python -m pip install --proxy http://one.proxy.att.com:8080 -r pip-requirements.txt + +ENV USER=default_user +ENV PW=default_pass + + +ENTRYPOINT ["python", "run.py"] diff --git a/a1-sdnc-vth/docker/container-tag.yaml b/a1-sdnc-vth/docker/container-tag.yaml new file mode 100644 index 0000000..ee078db --- /dev/null +++ b/a1-sdnc-vth/docker/container-tag.yaml @@ -0,0 +1,15 @@ +--- +# Copyright (c) 2019 AT&T Intellectual Property. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +tag: 0.0.1 diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore b/a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore new file mode 100644 index 0000000..daebc7d --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml new file mode 100644 index 0000000..6c6bf9a --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +appVersion: "1.0" +description: A Helm chart for the a1 sdnc Virtual Test Head +name: a1-sdnc-vth +version: 0.0.1 diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml new file mode 100644 index 0000000..ec08ac3 --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/deployment.yaml @@ -0,0 +1,137 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ .Values.appName}} + namespace: {{.Values.namespace}} + labels: + app: {{ .Values.appName}} + version: {{.Values.version}} +spec: + revisionHistoryLimit: 1 + minReadySeconds: 10 + strategy: + # indicate which strategy we want for rolling update + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + replicas: {{ .Values.replicas}} + selector: + matchLabels: + app: {{ .Values.appName}} + version: {{.Values.version}} + template: + metadata: + labels: + app: {{ .Values.appName}} + version: {{.Values.version}} + spec: + serviceAccount: default + volumes: + - name: {{ .Values.appName}}-cert-volume + secret: + secretName: {{.Values.sharedCert}} + optional: true + items: + - key: PEM_CERT + path: otf.pem + - key: PEM_KEY + path: privateKey.pem +# {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} TODO UNCOMMENT WHEN PUSHING TO ORAN +# {{else}} +# - name: logging-pvc +# persistentVolumeClaim: +# {{if eq .Values.env "prod"}} +# claimName: {{ .Values.pvc.prod | quote }} +# {{ else }} +# claimName: {{ .Values.pvc.dev | quote }} +# {{ end }} +# {{end}} + containers: + - name: {{ .Values.appName}} + image: {{ .Values.image}} + imagePullPolicy: Always + ports: + - name: http + containerPort: 6001 + nodePort: {{.Values.nodePort}} + protocol: TCP +# {{ if eq .Values.env "st"}} TODO UNCOMMENT FOR ORAN? +# resources: +# limits: +# memory: "512Mi" +# cpu: "500m" +# requests: +# memory: "256Mi" +# cpu: "100m" +# {{else}} +# resources: +# limits: +# memory: "1Gi" +# cpu: "1" +# requests: +# memory: "1Gi" +# cpu: "1" +# {{end}} + env: + - name: NAMESPACE + value: {{.Values.namespace}} + - name: APP_NAME + value: {{ .Values.appName}} + - name: APP_VERSION + value: {{.Values.version}} + - name: USE_CRED + value: {{.Values.auth.enabled | quote }} + - name: USER + valueFrom: + secretKeyRef: + name: {{ .Values.appName}} + key: api_user + optional: true + - name: PW + valueFrom: + secretKeyRef: + name: {{ .Values.appName}} + key: api_pass + optional: true + - name: USE_PROXY + value: {{.Values.proxy.enabled | quote }} + - name: HTTPS + value: {{.Values.proxy.https | quote }} + - name: HTTP + value: {{.Values.proxy.http | quote }} + - name: API_URL + value: {{.Values.api.base_url}} + - name: API_PORT + value: {{.Values.api.port | quote }} + volumeMounts: + - name: {{.Values.appName}}-cert-volume + mountPath: /opt/cert +# {{ if or (eq .Values.env "st") (eq .Values.env "prod-dr")}} +# {{else}} +# - name: logging-pvc +# mountPath: "/otf/logs" +# {{end}} + livenessProbe: + httpGet: + path: {{.Values.health}} + port: http + scheme: HTTP + httpHeaders: + - name: X-Custom-Header + value: Alive + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 30 + readinessProbe: + httpGet: + path: {{.Values.health}} + port: http + scheme: HTTP + httpHeaders: + - name: X-Custom-Header + value: Ready + initialDelaySeconds: 30 + timeoutSeconds: 30 + periodSeconds: 30 + restartPolicy: Always diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml new file mode 100644 index 0000000..4a0aa24 --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.appName}} +type: Opaque +data: + api_user: {{ .Values.auth.user | b64enc }} + api_pass: {{ .Values.auth.pw | b64enc }} diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml new file mode 100644 index 0000000..bcba0f4 --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/templates/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.appName }} + namespace: {{ .Values.namespace}} + labels: + app: {{ .Values.appName }} + version: {{ .Values.version}} +spec: + type: NodePort + ports: + - name: http + port: 6001 + protocol: TCP + nodePort: {{ .Values.nodePort}} + selector: + app: {{ .Values.appName }} + version: {{ .Values.version}} diff --git a/a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml b/a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml new file mode 100644 index 0000000..fac619d --- /dev/null +++ b/a1-sdnc-vth/helm/a1-sdnc-vth/values.yaml @@ -0,0 +1,23 @@ +appName: a1-sdnc-vth +env: dev +version: 0.0.1-SNAPSHOT +image: dockercentral.it.att.com:5100/com.att.ecomp.otf.dev/a1-sdnc-vth:0.0.1-SNAPSHOT +namespace: com-att-ecomp-otf-dev #org-oran-otf +nodePort: 32331 +replicas: 1 +health : /otf/vth/oran/a1/v1/health +sharedCert: otf-cert-secret-builder +pvc: + dev: org-oran-otf-dev-logs-pv + prod: org-oran-otf-prod-logs-pv +auth: + enabled: true + user: user + pw: pw +proxy: + enabled: false + http: + https: +api: + base_url: http://njcdtl08rg9907.itservices.sbc.com + port: 3000 diff --git a/a1-sdnc-vth/pip-requirements.txt b/a1-sdnc-vth/pip-requirements.txt new file mode 100644 index 0000000..d25f478 --- /dev/null +++ b/a1-sdnc-vth/pip-requirements.txt @@ -0,0 +1,6 @@ +flask +flask-cors +FLASK +FLASK-CORS +requests +configparser diff --git a/a1-sdnc-vth/run.py b/a1-sdnc-vth/run.py new file mode 100644 index 0000000..da0f1c7 --- /dev/null +++ b/a1-sdnc-vth/run.py @@ -0,0 +1,51 @@ +""" +# Copyright (c) 2019 AT&T Intellectual Property. # +# # +# Licensed under the Apache License, Version 2.0 (the "License"); # +# you may not use this file except in compliance with the License. # +# You may obtain a copy of the License at # +# # +# http://www.apache.org/licenses/LICENSE-2.0 # +# # +# Unless required by applicable law or agreed to in writing, software # +# distributed under the License is distributed on an "AS IS" BASIS, # +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # +# See the License for the specific language governing permissions and # +# limitations under the License. # +################################################################################ +# File name: a1-sdnc-vth.py # +# Description: vth for A1 service # +# Date created: 04/22/2020 # +# Last modified: 04/30/2020 # +# Python Version: 3.7.4 # +# Author: Raul Gomez (rg9907) # +# Email: rg9907@att.com # +################################################################################ +""" +import logging +from logging import FileHandler +from flask import Flask +from flask.logging import create_logger +from app.routes import ROUTES, ERRORS +#from dotenv import load_dotenv + +#load dev env vars +#load_dotenv() +# redirect http to https +APP = Flask(__name__) +LOG = create_logger(APP) + +# Prevents print statement every time an endpoint is triggered. +logging.getLogger("werkzeug").setLevel(logging.DEBUG) +#logging.getLogger("werkzeug").setLevel(logging.WARNING) +APP.register_blueprint(ERRORS) +APP.register_blueprint(ROUTES, url_prefix="/otf/vth/oran/a1/v1") + +if __name__ == '__main__': + LOG_HANDLER = FileHandler('a1-sdnc-vth.log', mode='a') + LOG_HANDLER.setLevel(logging.INFO) + LOG.setLevel(logging.INFO) + LOG.addHandler(LOG_HANDLER) + # context = ('opt/cert/otf.pem', 'opt/cert/privateKey.pem') + # app.run(debug = False, host = '0.0.0.0', port = 5000, ssl_context = context) + APP.run(debug=False, host='0.0.0.0', port=6001) diff --git a/otf-helm/.gitignore b/otf-helm/.gitignore new file mode 100644 index 0000000..f92f978 --- /dev/null +++ b/otf-helm/.gitignore @@ -0,0 +1 @@ +*.tgz diff --git a/otf-helm/deploy.sh b/otf-helm/deploy.sh new file mode 100644 index 0000000..e69de29 diff --git a/otf-helm/otf/.helmignore b/otf-helm/otf/.helmignore new file mode 100644 index 0000000..05d5aab --- /dev/null +++ b/otf-helm/otf/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/otf-helm/otf/Chart.yaml b/otf-helm/otf/Chart.yaml new file mode 100644 index 0000000..d1eda9c --- /dev/null +++ b/otf-helm/otf/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: otf +description: A Helm chart for OTF + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +version: 1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: Camille.2.1 diff --git a/otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml b/otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml new file mode 100644 index 0000000..465f465 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +name: mongodb +version: 7.8.10 +appVersion: 4.2.6 +# The mongodb chart is deprecated and no longer maintained. For details deprecation, see the PROCESSES.md file. +deprecated: true +description: DEPRECATED NoSQL document-oriented database that stores JSON-like documents with dynamic schemas, simplifying the integration of data in content-driven applications. +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +home: https://mongodb.org +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +sources: +- https://github.com/bitnami/bitnami-docker-mongodb +maintainers: [] +engine: gotpl diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json b/otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json new file mode 100644 index 0000000..257d37b --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/scripts/groups.json @@ -0,0 +1,46 @@ +{ + "_id" : ObjectId("5bdb2bdbd6b0d1f97953fbd7"), + "ownerId" : ObjectId("5b9bf50008a8133dc84c1496"), + "groupName" : "otf-public-dev", + "groupDescription" : "The OTF public group used in the dev environment.", + "parentGroupId" : null, + "members" : [ + { + "roles" : [ + "admin" + ], + "userId" : ObjectId("5b9bf50008a8133dc84c1496") + } + ], + "roles" : [ + { + "permissions" : [ + "read", + "write", + "execute", + "delete", + "management" + ], + "roleName" : "admin" + }, + { + "permissions" : [ + "read" + ], + "roleName" : "user" + }, + { + "permissions" : [ + "read", + "write", + "execute", + "delete" + ], + "roleName" : "developer" + } + ], + "mechanizedIds" : [ + ], + "_class" : "com.att.otf.api.domain.Group", + "updatedAt" : ISODate("2020-05-05T21:58:56.381Z") +} \ No newline at end of file diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh b/otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh new file mode 100644 index 0000000..a3ebcde --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/scripts/init_db.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +sleep 10; +mongoimport -c=users -d=otf --mode=upsert --username=otfuser --password=Today.123 --file=/data/scripts/users.json +mongoimport -c=users -d=otf --mode=upsert --username=otfuser --password=Today.123 --file=/data/scripts/groups.json diff --git a/otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json b/otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json new file mode 100644 index 0000000..059f9b2 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/scripts/users.json @@ -0,0 +1,28 @@ +{ + "_id" : ObjectId("5b9bf50008a8133dc84c1496"), + "permissions" : [ + "admin" + ], + "firstName" : "Admin", + "lastName" : "Admin", + "email" : "admin@test.com", + "password" : "$2a$13$TZQCQrG6LuNdHgpEXB9YgOfaYZC7xG2E3ICE9lO/0Y9rh5gPdbQWu", + "groups" : [ + { + "permissions" : [ + "admin" + ], + "groupId" : ObjectId("5bdb2bdbd6b0d1f97953fbd7") + } + ], + "createdAt" : ISODate("2020-05-05T12:13:05.176Z"), + "updatedAt" : ISODate("2020-05-05T20:40:16.591Z"), + "_class" : "com.att.otf.api.domain.User", + "favorites" : { + "testDefinitions" : [ + ] + }, + "enabled" : true, + "defaultGroup" : ObjectId("5bdb2bdbd6b0d1f97953fbd7"), + "defaultGroupEnabled" : true +} \ No newline at end of file diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml new file mode 100644 index 0000000..eefbba9 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{ .Values.global.mongodb.appName }}-init-scripts + +data: + {{- $files := .Files }} + {{- range $key, $value := .Files }} + {{- if hasPrefix "scripts/" $key }} {{/* only when in scripts/ */}} + {{ $key | trimPrefix "scripts/" }}: {{ $files.Get $key | quote }} {{/* adapt $key as desired */}} + {{- end }} + {{- end }} diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml new file mode 100644 index 0000000..78f2e5c --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ .Values.global.mongodb.appName }} + namespace: {{.Values.global.mongodb.namespace }} + labels: + app: {{ .Values.global.mongodb.appName }} + version: {{.Values.global.mongodb.version }} +spec: + revisionHistoryLimit: 1 # keep one replica set to allow rollback + minReadySeconds: 10 + strategy: + # indicate which strategy we want for rolling update + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + replicas: {{ .Values.global.mongodb.replicas }} + selector: + matchLabels: + app: {{ .Values.global.mongodb.appName }} + version: {{.Values.global.mongodb.version }} + template: + metadata: + labels: + app: {{ .Values.global.mongodb.appName }} + version: {{.Values.global.mongodb.version }} + spec: + serviceAccount: default + containers: + - name: {{ .Values.global.mongodb.appName }} + image: {{ .Values.global.mongodb.image.registry }}/{{ .Values.global.mongodb.image.repository }} + imagePullPolicy: Always + env: + - name: MONGODB_PASSWORD + valueFrom: + secretKeyRef: + name : {{ .Values.global.mongodb.appName }} + key: mongo_password + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name : {{ .Values.global.mongodb.appName }} + key: mongo_root_password + - name: MONGODB_USERNAME + value: {{ .Values.global.mongodb.mongodbUsername | quote }} + - name: MONGODB_DATABASE + value: {{ .Values.global.mongodb.mongodbDatabase | quote }} + ports: + - name: mongodb + containerPort: 27017 + hostPort: 27017 + resources: + limits: + memory: {{ .Values.global.mongodb.resources.limits.memory }} + cpu: {{ .Values.global.mongodb.resources.limits.cpu }} + requests: + memory: {{ .Values.global.mongodb.resources.requests.memory }} + cpu: {{ .Values.global.mongodb.resources.requests.cpu }} + lifecycle: + postStart: + exec: + command: ["/bin/bash", "-c", "cd data/scripts;./init_db.sh"] + volumeMounts: + - name: custom-init-scripts + mountPath: /data/scripts + volumes: + - name: custom-init-scripts + configMap: + name: {{ .Values.global.mongodb.appName }}-init-scripts + defaultMode: 0755 + restartPolicy: Always diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml new file mode 100644 index 0000000..6accedb --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/templates/ingress.yaml @@ -0,0 +1,30 @@ +# Need Updates to configure the connectivity when we deploy other microservices to connect to mongodb + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ .Values.global.mongodb.appName }} + namespace: {{.Values.global.mongodb.namespace }} + labels: + app: {{ .Values.global.mongodb.appName }} + version: {{.Values.global.mongodb.version }} + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port; + grpc_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port; +spec: + tls: + - hosts: + - {{ .Values.global.mongodb.nodeApi.host }} + secretName: {{.Values.global.mongodb.certName }} + rules: + - host: {{ .Values.global.mongodb.nodeApi.host }} + http: + paths: + - path: /mongodb/(.*) + backend: + serviceName: {{ .Values.global.mongodb.appName }} + servicePort: {{ .Values.global.mongodb.port }} diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml new file mode 100644 index 0000000..f450e74 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/templates/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.global.mongodb.appName }} +type: opaque +data: + mongo_root_password: {{ .Values.global.mongodb.mongodbRootPassword | b64enc }} + mongo_password: {{ .Values.global.mongodb.mongodbPassword | b64enc }} diff --git a/otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml b/otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml new file mode 100644 index 0000000..c72af9b --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.global.mongodb.appName }} + namespace: {{ .Values.global.mongodb.namespace }} + labels: + app: {{ .Values.global.mongodb.appName }} + version: {{ .Values.global.mongodb.version }} + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +spec: + type: LoadBalancer + ports: + - port: {{ .Values.global.mongodb.port }} + protocol: TCP + targetPort: {{ .Values.global.mongodb.targetPort }} + selector: + app: {{ .Values.global.mongodb.appName }} + version: {{ .Values.global.mongodb.version }} diff --git a/otf-helm/otf/charts/databases/charts/mongodb/values.yaml b/otf-helm/otf/charts/databases/charts/mongodb/values.yaml new file mode 100644 index 0000000..804031f --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mongodb/values.yaml @@ -0,0 +1,510 @@ +# Values yaml file for reference from the github. - currently not used. + +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +image: + ## Bitnami MongoDB registry + ## + registry: docker.io + ## Bitnami MongoDB image name + ## + repository: bitnami/mongodb + ## Bitnami MongoDB image tag + ## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ + ## + tag: 4.2.6-debian-10-r18 + ## Specify a imagePullPolicy + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns on Bitnami debugging in minideb-extras-base + ## ref: https://github.com/bitnami/minideb-extras-base + debug: false + +## String to partially override mongodb.fullname template (will maintain the release name) +## +# nameOverride: otf-mongo + +## String to fully override mongodb.fullname template +## +# fullnameOverride: + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## Enable authentication +## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ +# +usePassword: true +# existingSecret: name-of-existing-secret + +## MongoDB admin password +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#setting-the-root-password-on-first-run +## +mongodbRootPassword: otf.123 + +## MongoDB custom user and database +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run +## +mongodbUsername: otfuser +mongodbPassword: Today.123 +mongodbDatabase: otf + +## Whether enable/disable IPv6 on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-ipv6 +## +mongodbEnableIPv6: false + +## Whether enable/disable DirectoryPerDB on MongoDB +## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#enabling/disabling-directoryperdb +## +mongodbDirectoryPerDB: false + +## MongoDB System Log configuration +## ref: https://github.com/bitnami/bitnami-docker-mongodb#configuring-system-log-verbosity-level +## +mongodbSystemLogVerbosity: 0 +mongodbDisableSystemLog: false + +## MongoDB additional command line flags +## +## Can be used to specify command line flags, for example: +## +## mongodbExtraFlags: +## - "--wiredTigerCacheSizeGB=2" +mongodbExtraFlags: [] + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Kubernetes Cluster Domain +clusterDomain: cluster.local + +## Kubernetes service type +service: + ## Specify an explicit service name. + # name: svc-mongo + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + type: ClusterIP + # clusterIP: None + port: 27017 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Specify the externalIP value ClusterIP service type. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + # externalIPs: [] + + ## Specify the loadBalancerIP value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + # loadBalancerIP: + + ## Specify the loadBalancerSourceRanges value for LoadBalancer service types. + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: [] + +# Add custom extra environment variables to all the MongoDB containers +# extraEnvVars: + +## Use StatefulSet instead of Deployment when deploying standalone +useStatefulSet: false + +## Setting up replication +## ref: https://github.com/bitnami/bitnami-docker-mongodb#setting-up-a-replication +# +replicaSet: + ## Whether to create a MongoDB replica set for high availability or not + enabled: false + useHostnames: true + + ## Name of the replica set + ## + # name: mongoOTF + + ## Key used for replica set authentication + ## + # key: key + + ## Number of replicas per each node type + ## + replicas: + secondary: 1 + arbiter: 1 + + ## Pod Disruption Budget + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + pdb: + enabled: true + minAvailable: + primary: 1 + secondary: 1 + arbiter: 1 + # maxUnavailable: + # primary: 1 + # secondary: 1 + # arbiter: 1 + +# Annotations to be added to the deployment or statefulsets +annotations: {} + +# Additional labels to apply to the deployment or statefulsets +labels: {} + +# Annotations to be added to MongoDB pods +podAnnotations: {} + +# Additional pod labels to apply +podLabels: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} +# Define separate resources per arbiter, which are less then primary or secondary +# used only when replica set is enabled +resourcesArbiter: {} +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 100m +# memory: 256Mi + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} +# Define separate affinity for arbiter pod +affinityArbiter: {} + +## Tolerations +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## updateStrategy for MongoDB Primary, Secondary and Arbitrer statefulsets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Add sidecars to the pod +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecars: [] +## Array to add extra volumes +## +extraVolumes: [] +## Array to add extra mounts (normally used with extraVolumes) +## +extraVolumeMounts: [] + +## Add sidecars to the arbiter pod +# used only when replica set is enabled +## +## For example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +sidecarsArbiter: [] +## Array to add extra volumes to the arbiter +# used only when replica set is enabled +## +extraVolumesArbiter: [] +## Array to add extra mounts (normally used with extraVolumes) to the arbiter +# used only when replica set is enabled +## +extraVolumeMountsArbiter: [] + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## MongoDB images. + ## + mountPath: /bitnami/mongodb + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + ## mongodb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## Configure the ingress resource that allows you to access the +## MongoDB installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: mongodb.local + path: / + + ## The tls configuration for the ingress + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + tls: + - hosts: + - mongodb.local + secretName: mongodb.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: airflow.local-tls + # key: + # certificate: + +## Configure the options for init containers to be run before the main app containers +## are started. All init containers are run sequentially and must exit without errors +## for the next one to be started. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +# extraInitContainers: | +# - name: do-something +# image: busybox +# command: ['do', 'something'] + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +# Define custom config map with init scripts +initConfigMap: {} +# name: "init-config-map" + +## Entries for the MongoDB config file. For documentation of all options, see: +## http://docs.mongodb.org/manual/reference/configuration-options/ +## +configmap: +# # where and how to store data. +# storage: +# dbPath: /bitnami/mongodb/data/db +# journal: +# enabled: true +# directoryPerDB: false +# # where to write logging data. +# systemLog: +# destination: file +# quiet: false +# logAppend: true +# logRotate: reopen +# path: /opt/bitnami/mongodb/logs/mongodb.log +# verbosity: 0 +# # network interfaces +# net: +# port: 27017 +# unixDomainSocket: +# enabled: true +# pathPrefix: /opt/bitnami/mongodb/tmp +# ipv6: false +# bindIpAll: true +# # replica set options +# #replication: +# #replSetName: replicaset +# #enableMajorityReadConcern: true +# # process management options +# processManagement: +# fork: false +# pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +# # set parameter options +# setParameter: +# enableLocalhostAuthBypass: true +# # security options +# security: +# authorization: disabled +# #keyFile: /opt/bitnami/mongodb/conf/keyfile + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.10.0-debian-10-r41 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## String with extra arguments to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/master/mongodb_exporter.go + extraArgs: "" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Metrics exporter liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: false + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter pod Annotation + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9216" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + + ## Specify a namespace if needed + # namespace: monitoring + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Specify Metric Relabellings to add to the scrape endpoint + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # relabellings: + + alerting: + ## Define individual alerting rules as required + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + rules: {} + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Prometheus Rules to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml new file mode 100644 index 0000000..e803431 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +name: mysql +version: 1.6.3 +appVersion: 5.7.28 +description: Fast, reliable, scalable, and easy to use open-source relational database + system. +keywords: +- mysql +- database +- sql +home: https://www.mysql.com/ +icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png +sources: +- https://github.com/kubernetes/charts +- https://github.com/docker-library/mysql +maintainers: +engine: gotpl diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh new file mode 100644 index 0000000..9b748ca --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/init_db.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +sleep 20; +mysql -u otfuser otf_camunda -pToday.123 < /data/scripts/mysql_engine_7.10.0.sql +mysql -u otfuser otf_camunda -pToday.123 < /data/scripts/mysql_identity_7.10.0.sql + diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql new file mode 100644 index 0000000..aefe0cb --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_engine_7.10.0.sql @@ -0,0 +1,1298 @@ +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_GE_PROPERTY ( + NAME_ varchar(64), + VALUE_ varchar(300), + REV_ integer, + primary key (NAME_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +insert into ACT_GE_PROPERTY +values ('schema.version', 'fox', 1); + +insert into ACT_GE_PROPERTY +values ('schema.history', 'create(fox)', 1); + +insert into ACT_GE_PROPERTY +values ('next.dbid', '1', 1); + +insert into ACT_GE_PROPERTY +values ('deployment.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('history.cleanup.job.lock', '0', 1); + +insert into ACT_GE_PROPERTY +values ('startup.lock', '0', 1); + +create table ACT_GE_BYTEARRAY ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + DEPLOYMENT_ID_ varchar(64), + BYTES_ LONGBLOB, + GENERATED_ TINYINT, + TENANT_ID_ varchar(64), + TYPE_ integer, + CREATE_TIME_ datetime, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RE_DEPLOYMENT ( + ID_ varchar(64), + NAME_ varchar(255), + DEPLOY_TIME_ timestamp, + SOURCE_ varchar(255), + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_EXECUTION ( + ID_ varchar(64), + REV_ integer, + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + BUSINESS_KEY_ varchar(255), + PARENT_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + SUPER_EXEC_ varchar(64), + SUPER_CASE_EXEC_ varchar(64), + CASE_INST_ID_ varchar(64), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + IS_ACTIVE_ TINYINT, + IS_CONCURRENT_ TINYINT, + IS_SCOPE_ TINYINT, + IS_EVENT_SCOPE_ TINYINT, + SUSPENSION_STATE_ integer, + CACHED_ENT_STATE_ integer, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_JOB ( + ID_ varchar(64) NOT NULL, + REV_ integer, + TYPE_ varchar(255) NOT NULL, + LOCK_EXP_TIME_ timestamp NULL, + LOCK_OWNER_ varchar(255), + EXCLUSIVE_ boolean, + EXECUTION_ID_ varchar(64), + PROCESS_INSTANCE_ID_ varchar(64), + PROCESS_DEF_ID_ varchar(64), + PROCESS_DEF_KEY_ varchar(255), + RETRIES_ integer, + EXCEPTION_STACK_ID_ varchar(64), + EXCEPTION_MSG_ varchar(4000), + DUEDATE_ timestamp NULL, + REPEAT_ varchar(255), + HANDLER_TYPE_ varchar(255), + HANDLER_CFG_ varchar(4000), + DEPLOYMENT_ID_ varchar(64), + SUSPENSION_STATE_ integer NOT NULL DEFAULT 1, + JOB_DEF_ID_ varchar(64), + PRIORITY_ bigint NOT NULL DEFAULT 0, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + CREATE_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_JOBDEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ACT_ID_ varchar(255), + JOB_TYPE_ varchar(255) NOT NULL, + JOB_CONFIGURATION_ varchar(255), + SUSPENSION_STATE_ integer, + JOB_PRIORITY_ bigint, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RE_PROCDEF ( + ID_ varchar(64) not null, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) not null, + VERSION_ integer not null, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + HAS_START_FORM_KEY_ TINYINT, + SUSPENSION_STATE_ integer, + TENANT_ID_ varchar(64), + VERSION_TAG_ varchar(64), + HISTORY_TTL_ integer, + STARTABLE_ boolean NOT NULL default TRUE, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_TASK ( + ID_ varchar(64), + REV_ integer, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + NAME_ varchar(255), + PARENT_TASK_ID_ varchar(64), + DESCRIPTION_ varchar(4000), + TASK_DEF_KEY_ varchar(255), + OWNER_ varchar(255), + ASSIGNEE_ varchar(255), + DELEGATION_ varchar(64), + PRIORITY_ integer, + CREATE_TIME_ timestamp, + DUE_DATE_ datetime, + FOLLOW_UP_DATE_ datetime, + SUSPENSION_STATE_ integer, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_IDENTITYLINK ( + ID_ varchar(64), + REV_ integer, + GROUP_ID_ varchar(255), + TYPE_ varchar(255), + USER_ID_ varchar(255), + TASK_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_VARIABLE ( + ID_ varchar(64) not null, + REV_ integer, + TYPE_ varchar(255) not null, + NAME_ varchar(255) not null, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + TASK_ID_ varchar(64), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + VAR_SCOPE_ varchar(64) not null, + SEQUENCE_COUNTER_ bigint, + IS_CONCURRENT_LOCAL_ TINYINT, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_EVENT_SUBSCR ( + ID_ varchar(64) not null, + REV_ integer, + EVENT_TYPE_ varchar(255) not null, + EVENT_NAME_ varchar(255), + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + ACTIVITY_ID_ varchar(255), + CONFIGURATION_ varchar(255), + CREATED_ timestamp not null, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_INCIDENT ( + ID_ varchar(64) not null, + REV_ integer not null, + INCIDENT_TIMESTAMP_ timestamp not null, + INCIDENT_MSG_ varchar(4000), + INCIDENT_TYPE_ varchar(255) not null, + EXECUTION_ID_ varchar(64), + ACTIVITY_ID_ varchar(255), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + CAUSE_INCIDENT_ID_ varchar(64), + ROOT_CAUSE_INCIDENT_ID_ varchar(64), + CONFIGURATION_ varchar(255), + TENANT_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_AUTHORIZATION ( + ID_ varchar(64) not null, + REV_ integer not null, + TYPE_ integer not null, + GROUP_ID_ varchar(255), + USER_ID_ varchar(255), + RESOURCE_TYPE_ integer not null, + RESOURCE_ID_ varchar(255), + PERMS_ integer, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_FILTER ( + ID_ varchar(64) not null, + REV_ integer not null, + RESOURCE_TYPE_ varchar(255) not null, + NAME_ varchar(255) not null, + OWNER_ varchar(255), + QUERY_ LONGTEXT not null, + PROPERTIES_ LONGTEXT, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_METER_LOG ( + ID_ varchar(64) not null, + NAME_ varchar(64) not null, + REPORTER_ varchar(255), + VALUE_ bigint, + TIMESTAMP_ timestamp, + MILLISECONDS_ bigint DEFAULT 0, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_EXT_TASK ( + ID_ varchar(64) not null, + REV_ integer not null, + WORKER_ID_ varchar(255), + TOPIC_NAME_ varchar(255), + RETRIES_ integer, + ERROR_MSG_ varchar(4000), + ERROR_DETAILS_ID_ varchar(64), + LOCK_EXP_TIME_ timestamp NULL, + SUSPENSION_STATE_ integer, + EXECUTION_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + TENANT_ID_ varchar(64), + PRIORITY_ bigint NOT NULL DEFAULT 0, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_RU_BATCH ( + ID_ varchar(64) not null, + REV_ integer not null, + TYPE_ varchar(255), + TOTAL_JOBS_ integer, + JOBS_CREATED_ integer, + JOBS_PER_SEED_ integer, + INVOCATIONS_PER_JOB_ integer, + SEED_JOB_DEF_ID_ varchar(64), + BATCH_JOB_DEF_ID_ varchar(64), + MONITOR_JOB_DEF_ID_ varchar(64), + SUSPENSION_STATE_ integer, + CONFIGURATION_ varchar(255), + TENANT_ID_ varchar(64), + CREATE_USER_ID_ varchar(255), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create index ACT_IDX_EXEC_ROOT_PI on ACT_RU_EXECUTION(ROOT_PROC_INST_ID_); +create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_); +create index ACT_IDX_EXEC_TENANT_ID on ACT_RU_EXECUTION(TENANT_ID_); +create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_); +create index ACT_IDX_TASK_ASSIGNEE on ACT_RU_TASK(ASSIGNEE_); +create index ACT_IDX_TASK_TENANT_ID on ACT_RU_TASK(TENANT_ID_); +create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_); +create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_); +create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_); +create index ACT_IDX_EVENT_SUBSCR_TENANT_ID on ACT_RU_EVENT_SUBSCR(TENANT_ID_); +create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_); +create index ACT_IDX_VARIABLE_TENANT_ID on ACT_RU_VARIABLE(TENANT_ID_); +create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_); +create index ACT_IDX_INC_CONFIGURATION on ACT_RU_INCIDENT(CONFIGURATION_); +create index ACT_IDX_INC_TENANT_ID on ACT_RU_INCIDENT(TENANT_ID_); +-- CAM-5914 +create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_); +-- this index needs to be limited in mysql see CAM-6938 +create index ACT_IDX_JOB_HANDLER on ACT_RU_JOB(HANDLER_TYPE_(100),HANDLER_CFG_(155)); +create index ACT_IDX_JOB_PROCINST on ACT_RU_JOB(PROCESS_INSTANCE_ID_); +create index ACT_IDX_JOB_TENANT_ID on ACT_RU_JOB(TENANT_ID_); +create index ACT_IDX_JOBDEF_TENANT_ID on ACT_RU_JOBDEF(TENANT_ID_); + +-- new metric milliseconds column +CREATE INDEX ACT_IDX_METER_LOG_MS ON ACT_RU_METER_LOG(MILLISECONDS_); +CREATE INDEX ACT_IDX_METER_LOG_NAME_MS ON ACT_RU_METER_LOG(NAME_, MILLISECONDS_); +CREATE INDEX ACT_IDX_METER_LOG_REPORT ON ACT_RU_METER_LOG(NAME_, REPORTER_, MILLISECONDS_); + +-- old metric timestamp column +CREATE INDEX ACT_IDX_METER_LOG_TIME ON ACT_RU_METER_LOG(TIMESTAMP_); +CREATE INDEX ACT_IDX_METER_LOG ON ACT_RU_METER_LOG(NAME_, TIMESTAMP_); + +create index ACT_IDX_EXT_TASK_TOPIC on ACT_RU_EXT_TASK(TOPIC_NAME_); +create index ACT_IDX_EXT_TASK_TENANT_ID on ACT_RU_EXT_TASK(TENANT_ID_); +create index ACT_IDX_EXT_TASK_PRIORITY ON ACT_RU_EXT_TASK(PRIORITY_); +create index ACT_IDX_EXT_TASK_ERR_DETAILS ON ACT_RU_EXT_TASK(ERROR_DETAILS_ID_); +create index ACT_IDX_AUTH_GROUP_ID on ACT_RU_AUTHORIZATION(GROUP_ID_); +create index ACT_IDX_JOB_JOB_DEF_ID on ACT_RU_JOB(JOB_DEF_ID_); + +alter table ACT_GE_BYTEARRAY + add constraint ACT_FK_BYTEARR_DEPL + foreign key (DEPLOYMENT_ID_) + references ACT_RE_DEPLOYMENT (ID_); + +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_) on delete cascade on update cascade; + +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PARENT + foreign key (PARENT_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_SUPER + foreign key (SUPER_EXEC_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_EXECUTION + add constraint ACT_FK_EXE_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_TSKASS_TASK + foreign key (TASK_ID_) + references ACT_RU_TASK (ID_); + +alter table ACT_RU_IDENTITYLINK + add constraint ACT_FK_ATHRZ_PROCEDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF(ID_); + +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION(ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_BYTEARRAY + foreign key (BYTEARRAY_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_JOB + add constraint ACT_FK_JOB_EXCEPTION + foreign key (EXCEPTION_STACK_ID_) + references ACT_GE_BYTEARRAY (ID_); + +alter table ACT_RU_EVENT_SUBSCR + add constraint ACT_FK_EVENT_EXEC + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION(ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_PROCINST + foreign key (PROC_INST_ID_) + references ACT_RU_EXECUTION (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_PROCDEF + foreign key (PROC_DEF_ID_) + references ACT_RE_PROCDEF (ID_); + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_CAUSE + foreign key (CAUSE_INCIDENT_ID_) + references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade; + +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_RCAUSE + foreign key (ROOT_CAUSE_INCIDENT_ID_) + references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade; + +alter table ACT_RU_EXT_TASK + add constraint ACT_FK_EXT_TASK_ERROR_DETAILS + foreign key (ERROR_DETAILS_ID_) + references ACT_GE_BYTEARRAY (ID_); + +create index ACT_IDX_INC_JOB_DEF on ACT_RU_INCIDENT(JOB_DEF_ID_); +alter table ACT_RU_INCIDENT + add constraint ACT_FK_INC_JOB_DEF + foreign key (JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +alter table ACT_RU_AUTHORIZATION + add constraint ACT_UNIQ_AUTH_USER + unique (USER_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_); + +alter table ACT_RU_AUTHORIZATION + add constraint ACT_UNIQ_AUTH_GROUP + unique (GROUP_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_UNIQ_VARIABLE + unique (VAR_SCOPE_, NAME_); + +alter table ACT_RU_EXT_TASK + add constraint ACT_FK_EXT_TASK_EXE + foreign key (EXECUTION_ID_) + references ACT_RU_EXECUTION (ID_); + +create index ACT_IDX_BATCH_SEED_JOB_DEF ON ACT_RU_BATCH(SEED_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_SEED_JOB_DEF + foreign key (SEED_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +create index ACT_IDX_BATCH_MONITOR_JOB_DEF ON ACT_RU_BATCH(MONITOR_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_MONITOR_JOB_DEF + foreign key (MONITOR_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +create index ACT_IDX_BATCH_JOB_DEF ON ACT_RU_BATCH(BATCH_JOB_DEF_ID_); +alter table ACT_RU_BATCH + add constraint ACT_FK_BATCH_JOB_DEF + foreign key (BATCH_JOB_DEF_ID_) + references ACT_RU_JOBDEF (ID_); + +-- indexes for deadlock problems - https://app.camunda.com/jira/browse/CAM-2567 -- +create index ACT_IDX_INC_CAUSEINCID on ACT_RU_INCIDENT(CAUSE_INCIDENT_ID_); +create index ACT_IDX_INC_EXID on ACT_RU_INCIDENT(EXECUTION_ID_); +create index ACT_IDX_INC_PROCDEFID on ACT_RU_INCIDENT(PROC_DEF_ID_); +create index ACT_IDX_INC_PROCINSTID on ACT_RU_INCIDENT(PROC_INST_ID_); +create index ACT_IDX_INC_ROOTCAUSEINCID on ACT_RU_INCIDENT(ROOT_CAUSE_INCIDENT_ID_); +-- index for deadlock problem - https://app.camunda.com/jira/browse/CAM-4440 -- +create index ACT_IDX_AUTH_RESOURCE_ID on ACT_RU_AUTHORIZATION(RESOURCE_ID_); +-- index to prevent deadlock on fk constraint - https://app.camunda.com/jira/browse/CAM-5440 -- +create index ACT_IDX_EXT_TASK_EXEC on ACT_RU_EXT_TASK(EXECUTION_ID_); + +-- indexes to improve deployment +create index ACT_IDX_BYTEARRAY_ROOT_PI on ACT_GE_BYTEARRAY(ROOT_PROC_INST_ID_); +create index ACT_IDX_BYTEARRAY_RM_TIME on ACT_GE_BYTEARRAY(REMOVAL_TIME_); +create index ACT_IDX_BYTEARRAY_NAME on ACT_GE_BYTEARRAY(NAME_); +create index ACT_IDX_DEPLOYMENT_NAME on ACT_RE_DEPLOYMENT(NAME_); +create index ACT_IDX_DEPLOYMENT_TENANT_ID on ACT_RE_DEPLOYMENT(TENANT_ID_); +create index ACT_IDX_JOBDEF_PROC_DEF_ID ON ACT_RU_JOBDEF(PROC_DEF_ID_); +create index ACT_IDX_JOB_HANDLER_TYPE ON ACT_RU_JOB(HANDLER_TYPE_); +create index ACT_IDX_EVENT_SUBSCR_EVT_NAME ON ACT_RU_EVENT_SUBSCR(EVENT_NAME_); +create index ACT_IDX_PROCDEF_DEPLOYMENT_ID ON ACT_RE_PROCDEF(DEPLOYMENT_ID_); +create index ACT_IDX_PROCDEF_TENANT_ID ON ACT_RE_PROCDEF(TENANT_ID_); +create index ACT_IDX_PROCDEF_VER_TAG ON ACT_RE_PROCDEF(VERSION_TAG_); +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create case definition table -- +create table ACT_RE_CASE_DEF ( + ID_ varchar(64) not null, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) not null, + VERSION_ integer not null, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + TENANT_ID_ varchar(64), + HISTORY_TTL_ integer, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create case execution table -- +create table ACT_RU_CASE_EXECUTION ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CASE_INST_ID_ varchar(64), + SUPER_CASE_EXEC_ varchar(64), + SUPER_EXEC_ varchar(64), + BUSINESS_KEY_ varchar(255), + PARENT_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + ACT_ID_ varchar(255), + PREV_STATE_ integer, + CURRENT_STATE_ integer, + REQUIRED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create case sentry part table -- + +create table ACT_RU_CASE_SENTRY_PART ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CASE_INST_ID_ varchar(64), + CASE_EXEC_ID_ varchar(64), + SENTRY_ID_ varchar(255), + TYPE_ varchar(255), + SOURCE_CASE_EXEC_ID_ varchar(64), + STANDARD_EVENT_ varchar(255), + SOURCE_ varchar(255), + VARIABLE_EVENT_ varchar(255), + VARIABLE_NAME_ varchar(255), + SATISFIED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create index on business key -- +create index ACT_IDX_CASE_EXEC_BUSKEY on ACT_RU_CASE_EXECUTION(BUSINESS_KEY_); + +-- https://app.camunda.com/jira/browse/CAM-9165 +create index ACT_IDX_CASE_EXE_CASE_INST on ACT_RU_CASE_EXECUTION(CASE_INST_ID_); + +-- create foreign key constraints on ACT_RU_CASE_EXECUTION -- +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_) on delete cascade on update cascade; + +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_PARENT + foreign key (PARENT_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +alter table ACT_RU_CASE_EXECUTION + add constraint ACT_FK_CASE_EXE_CASE_DEF + foreign key (CASE_DEF_ID_) + references ACT_RE_CASE_DEF(ID_); + +-- create foreign key constraints on ACT_RU_VARIABLE -- +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_CASE_EXE + foreign key (CASE_EXECUTION_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +alter table ACT_RU_VARIABLE + add constraint ACT_FK_VAR_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +-- create foreign key constraints on ACT_RU_TASK -- +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_CASE_EXE + foreign key (CASE_EXECUTION_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +alter table ACT_RU_TASK + add constraint ACT_FK_TASK_CASE_DEF + foreign key (CASE_DEF_ID_) + references ACT_RE_CASE_DEF(ID_); + +-- create foreign key constraints on ACT_RU_CASE_SENTRY_PART -- +alter table ACT_RU_CASE_SENTRY_PART + add constraint ACT_FK_CASE_SENTRY_CASE_INST + foreign key (CASE_INST_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +alter table ACT_RU_CASE_SENTRY_PART + add constraint ACT_FK_CASE_SENTRY_CASE_EXEC + foreign key (CASE_EXEC_ID_) + references ACT_RU_CASE_EXECUTION(ID_); + +create index ACT_IDX_CASE_DEF_TENANT_ID on ACT_RE_CASE_DEF(TENANT_ID_); +create index ACT_IDX_CASE_EXEC_TENANT_ID on ACT_RU_CASE_EXECUTION(TENANT_ID_); +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create decision definition table -- +create table ACT_RE_DECISION_DEF ( + ID_ varchar(64) not null, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) not null, + VERSION_ integer not null, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + DEC_REQ_ID_ varchar(64), + DEC_REQ_KEY_ varchar(255), + TENANT_ID_ varchar(64), + HISTORY_TTL_ integer, + VERSION_TAG_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create decision requirements definition table -- +create table ACT_RE_DECISION_REQ_DEF ( + ID_ varchar(64) NOT NULL, + REV_ integer, + CATEGORY_ varchar(255), + NAME_ varchar(255), + KEY_ varchar(255) NOT NULL, + VERSION_ integer NOT NULL, + DEPLOYMENT_ID_ varchar(64), + RESOURCE_NAME_ varchar(4000), + DGRM_RESOURCE_NAME_ varchar(4000), + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +alter table ACT_RE_DECISION_DEF + add constraint ACT_FK_DEC_REQ + foreign key (DEC_REQ_ID_) + references ACT_RE_DECISION_REQ_DEF(ID_); + +create index ACT_IDX_DEC_DEF_TENANT_ID on ACT_RE_DECISION_DEF(TENANT_ID_); +create index ACT_IDX_DEC_DEF_REQ_ID on ACT_RE_DECISION_DEF(DEC_REQ_ID_); +create index ACT_IDX_DEC_REQ_DEF_TENANT_ID on ACT_RE_DECISION_REQ_DEF(TENANT_ID_); + +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_HI_PROCINST ( + ID_ varchar(64) not null, + PROC_INST_ID_ varchar(64) not null, + BUSINESS_KEY_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64) not null, + START_TIME_ datetime not null, + END_TIME_ datetime, + REMOVAL_TIME_ datetime, + DURATION_ bigint, + START_USER_ID_ varchar(255), + START_ACT_ID_ varchar(255), + END_ACT_ID_ varchar(255), + SUPER_PROCESS_INSTANCE_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + SUPER_CASE_INSTANCE_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + DELETE_REASON_ varchar(4000), + TENANT_ID_ varchar(64), + STATE_ varchar(255), + primary key (ID_), + unique (PROC_INST_ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_ACTINST ( + ID_ varchar(64) not null, + PARENT_ACT_INST_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64) not null, + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64) not null, + EXECUTION_ID_ varchar(64) not null, + ACT_ID_ varchar(255) not null, + TASK_ID_ varchar(64), + CALL_PROC_INST_ID_ varchar(64), + CALL_CASE_INST_ID_ varchar(64), + ACT_NAME_ varchar(255), + ACT_TYPE_ varchar(255) not null, + ASSIGNEE_ varchar(64), + START_TIME_ datetime not null, + END_TIME_ datetime, + DURATION_ bigint, + ACT_INST_STATE_ integer, + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_TASKINST ( + ID_ varchar(64) not null, + TASK_DEF_KEY_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + NAME_ varchar(255), + PARENT_TASK_ID_ varchar(64), + DESCRIPTION_ varchar(4000), + OWNER_ varchar(255), + ASSIGNEE_ varchar(255), + START_TIME_ datetime not null, + END_TIME_ datetime, + DURATION_ bigint, + DELETE_REASON_ varchar(4000), + PRIORITY_ integer, + DUE_DATE_ datetime, + FOLLOW_UP_DATE_ datetime, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_VARINST ( + ID_ varchar(64) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + NAME_ varchar(255) not null, + VAR_TYPE_ varchar(100), + CREATE_TIME_ datetime, + REV_ integer, + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + STATE_ varchar(20), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_DETAIL ( + ID_ varchar(64) not null, + TYPE_ varchar(255) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + VAR_INST_ID_ varchar(64), + NAME_ varchar(255) not null, + VAR_TYPE_ varchar(255), + REV_ integer, + TIME_ datetime not null, + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + OPERATION_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_IDENTITYLINK ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + TYPE_ varchar(255), + USER_ID_ varchar(255), + GROUP_ID_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + OPERATION_TYPE_ varchar(64), + ASSIGNER_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_COMMENT ( + ID_ varchar(64) not null, + TYPE_ varchar(255), + TIME_ datetime not null, + USER_ID_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + ACTION_ varchar(255), + MESSAGE_ varchar(4000), + FULL_MSG_ LONGBLOB, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_ATTACHMENT ( + ID_ varchar(64) not null, + REV_ integer, + USER_ID_ varchar(255), + NAME_ varchar(255), + DESCRIPTION_ varchar(4000), + TYPE_ varchar(255), + TASK_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + URL_ varchar(4000), + CONTENT_ID_ varchar(64), + TENANT_ID_ varchar(64), + CREATE_TIME_ datetime, + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_OP_LOG ( + ID_ varchar(64) not null, + DEPLOYMENT_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + CASE_EXECUTION_ID_ varchar(64), + TASK_ID_ varchar(64), + JOB_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + BATCH_ID_ varchar(64), + USER_ID_ varchar(255), + TIMESTAMP_ timestamp not null, + OPERATION_TYPE_ varchar(64), + OPERATION_ID_ varchar(64), + ENTITY_TYPE_ varchar(30), + PROPERTY_ varchar(64), + ORG_VALUE_ varchar(4000), + NEW_VALUE_ varchar(4000), + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_INCIDENT ( + ID_ varchar(64) not null, + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + CREATE_TIME_ timestamp not null, + END_TIME_ timestamp null, + INCIDENT_MSG_ varchar(4000), + INCIDENT_TYPE_ varchar(255) not null, + ACTIVITY_ID_ varchar(255), + CAUSE_INCIDENT_ID_ varchar(64), + ROOT_CAUSE_INCIDENT_ID_ varchar(64), + CONFIGURATION_ varchar(255), + INCIDENT_STATE_ integer, + TENANT_ID_ varchar(64), + JOB_DEF_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_JOB_LOG ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + JOB_ID_ varchar(64) not null, + JOB_DUEDATE_ timestamp NULL, + JOB_RETRIES_ integer, + JOB_PRIORITY_ bigint NOT NULL DEFAULT 0, + JOB_EXCEPTION_MSG_ varchar(4000), + JOB_EXCEPTION_STACK_ID_ varchar(64), + JOB_STATE_ integer, + JOB_DEF_ID_ varchar(64), + JOB_DEF_TYPE_ varchar(255), + JOB_DEF_CONFIGURATION_ varchar(255), + ACT_ID_ varchar(255), + EXECUTION_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROCESS_INSTANCE_ID_ varchar(64), + PROCESS_DEF_ID_ varchar(64), + PROCESS_DEF_KEY_ varchar(255), + DEPLOYMENT_ID_ varchar(64), + SEQUENCE_COUNTER_ bigint, + TENANT_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_BATCH ( + ID_ varchar(64) not null, + TYPE_ varchar(255), + TOTAL_JOBS_ integer, + JOBS_PER_SEED_ integer, + INVOCATIONS_PER_JOB_ integer, + SEED_JOB_DEF_ID_ varchar(64), + MONITOR_JOB_DEF_ID_ varchar(64), + BATCH_JOB_DEF_ID_ varchar(64), + TENANT_ID_ varchar(64), + CREATE_USER_ID_ varchar(255), + START_TIME_ datetime not null, + END_TIME_ datetime, + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_EXT_TASK_LOG ( + ID_ varchar(64) not null, + TIMESTAMP_ timestamp not null, + EXT_TASK_ID_ varchar(64) not null, + RETRIES_ integer, + TOPIC_NAME_ varchar(255), + WORKER_ID_ varchar(255), + PRIORITY_ bigint not null default 0, + ERROR_MSG_ varchar(4000), + ERROR_DETAILS_ID_ varchar(64), + ACT_ID_ varchar(255), + ACT_INST_ID_ varchar(64), + EXECUTION_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + PROC_DEF_ID_ varchar(64), + PROC_DEF_KEY_ varchar(255), + TENANT_ID_ varchar(64), + STATE_ integer, + REV_ integer, + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_); +create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_); +create index ACT_IDX_HI_PRO_INST_TENANT_ID on ACT_HI_PROCINST(TENANT_ID_); +create index ACT_IDX_HI_PRO_INST_PROC_DEF_KEY on ACT_HI_PROCINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_PRO_INST_PROC_TIME on ACT_HI_PROCINST(START_TIME_, END_TIME_); +create index ACT_IDX_HI_PI_PDEFID_END_TIME on ACT_HI_PROCINST(PROC_DEF_ID_, END_TIME_); +create index ACT_IDX_HI_PRO_INST_ROOT_PI on ACT_HI_PROCINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_PRO_INST_RM_TIME on ACT_HI_PROCINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_ACTINST_ROOT_PI on ACT_HI_ACTINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_); +create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_); +create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_); +create index ACT_IDX_HI_ACT_INST_COMP on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_, END_TIME_, ID_); +create index ACT_IDX_HI_ACT_INST_STATS on ACT_HI_ACTINST(PROC_DEF_ID_, PROC_INST_ID_, ACT_ID_, END_TIME_, ACT_INST_STATE_); +create index ACT_IDX_HI_ACT_INST_TENANT_ID on ACT_HI_ACTINST(TENANT_ID_); +create index ACT_IDX_HI_ACT_INST_PROC_DEF_KEY on ACT_HI_ACTINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_AI_PDEFID_END_TIME on ACT_HI_ACTINST(PROC_DEF_ID_, END_TIME_); +create index ACT_IDX_HI_ACT_INST_RM_TIME on ACT_HI_ACTINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_TASKINST_ROOT_PI on ACT_HI_TASKINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_TASK_INST_TENANT_ID on ACT_HI_TASKINST(TENANT_ID_); +create index ACT_IDX_HI_TASK_INST_PROC_DEF_KEY on ACT_HI_TASKINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_TASKINST_PROCINST on ACT_HI_TASKINST(PROC_INST_ID_); +create index ACT_IDX_HI_TASKINSTID_PROCINST on ACT_HI_TASKINST(ID_,PROC_INST_ID_); +create index ACT_IDX_HI_TASK_INST_RM_TIME on ACT_HI_TASKINST(REMOVAL_TIME_); +create index ACT_IDX_HI_TASK_INST_START on ACT_HI_TASKINST(START_TIME_); +create index ACT_IDX_HI_TASK_INST_END on ACT_HI_TASKINST(END_TIME_); + +create index ACT_IDX_HI_DETAIL_ROOT_PI on ACT_HI_DETAIL(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_); +create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_); +create index ACT_IDX_HI_DETAIL_CASE_INST on ACT_HI_DETAIL(CASE_INST_ID_); +create index ACT_IDX_HI_DETAIL_CASE_EXEC on ACT_HI_DETAIL(CASE_EXECUTION_ID_); +create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_); +create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_); +create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_); +create index ACT_IDX_HI_DETAIL_TENANT_ID on ACT_HI_DETAIL(TENANT_ID_); +create index ACT_IDX_HI_DETAIL_PROC_DEF_KEY on ACT_HI_DETAIL(PROC_DEF_KEY_); +create index ACT_IDX_HI_DETAIL_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_); +create index ACT_IDX_HI_DETAIL_RM_TIME on ACT_HI_DETAIL(REMOVAL_TIME_); +create index ACT_IDX_HI_DETAIL_TASK_BYTEAR on ACT_HI_DETAIL(BYTEARRAY_ID_, TASK_ID_); + +create index ACT_IDX_HI_IDENT_LNK_ROOT_PI on ACT_HI_IDENTITYLINK(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_); +create index ACT_IDX_HI_IDENT_LNK_GROUP on ACT_HI_IDENTITYLINK(GROUP_ID_); +create index ACT_IDX_HI_IDENT_LNK_TENANT_ID on ACT_HI_IDENTITYLINK(TENANT_ID_); +create index ACT_IDX_HI_IDENT_LNK_PROC_DEF_KEY on ACT_HI_IDENTITYLINK(PROC_DEF_KEY_); +create index ACT_IDX_HI_IDENT_LINK_TASK on ACT_HI_IDENTITYLINK(TASK_ID_); +create index ACT_IDX_HI_IDENT_LINK_RM_TIME on ACT_HI_IDENTITYLINK(REMOVAL_TIME_); + +create index ACT_IDX_HI_VARINST_ROOT_PI on ACT_HI_VARINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_); +create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_); +create index ACT_IDX_HI_CASEVAR_CASE_INST on ACT_HI_VARINST(CASE_INST_ID_); +create index ACT_IDX_HI_VAR_INST_TENANT_ID on ACT_HI_VARINST(TENANT_ID_); +create index ACT_IDX_HI_VAR_INST_PROC_DEF_KEY on ACT_HI_VARINST(PROC_DEF_KEY_); +create index ACT_IDX_HI_VARINST_BYTEAR on ACT_HI_VARINST(BYTEARRAY_ID_); +create index ACT_IDX_HI_VARINST_RM_TIME on ACT_HI_VARINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_INCIDENT_TENANT_ID on ACT_HI_INCIDENT(TENANT_ID_); +create index ACT_IDX_HI_INCIDENT_PROC_DEF_KEY on ACT_HI_INCIDENT(PROC_DEF_KEY_); +create index ACT_IDX_HI_INCIDENT_ROOT_PI on ACT_HI_INCIDENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_INCIDENT_PROCINST on ACT_HI_INCIDENT(PROC_INST_ID_); +create index ACT_IDX_HI_INCIDENT_RM_TIME on ACT_HI_INCIDENT(REMOVAL_TIME_); + +create index ACT_IDX_HI_JOB_LOG_ROOT_PI on ACT_HI_JOB_LOG(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_JOB_LOG_PROCINST on ACT_HI_JOB_LOG(PROCESS_INSTANCE_ID_); +create index ACT_IDX_HI_JOB_LOG_PROCDEF on ACT_HI_JOB_LOG(PROCESS_DEF_ID_); +create index ACT_IDX_HI_JOB_LOG_TENANT_ID on ACT_HI_JOB_LOG(TENANT_ID_); +create index ACT_IDX_HI_JOB_LOG_JOB_DEF_ID on ACT_HI_JOB_LOG(JOB_DEF_ID_); +create index ACT_IDX_HI_JOB_LOG_PROC_DEF_KEY on ACT_HI_JOB_LOG(PROCESS_DEF_KEY_); +create index ACT_IDX_HI_JOB_LOG_EX_STACK on ACT_HI_JOB_LOG(JOB_EXCEPTION_STACK_ID_); +create index ACT_IDX_HI_JOB_LOG_RM_TIME on ACT_HI_JOB_LOG(REMOVAL_TIME_); + +create index ACT_HI_BAT_RM_TIME on ACT_HI_BATCH(REMOVAL_TIME_); + +create index ACT_HI_EXT_TASK_LOG_ROOT_PI on ACT_HI_EXT_TASK_LOG(ROOT_PROC_INST_ID_); +create index ACT_HI_EXT_TASK_LOG_PROCINST on ACT_HI_EXT_TASK_LOG(PROC_INST_ID_); +create index ACT_HI_EXT_TASK_LOG_PROCDEF on ACT_HI_EXT_TASK_LOG(PROC_DEF_ID_); +create index ACT_HI_EXT_TASK_LOG_PROC_DEF_KEY on ACT_HI_EXT_TASK_LOG(PROC_DEF_KEY_); +create index ACT_HI_EXT_TASK_LOG_TENANT_ID on ACT_HI_EXT_TASK_LOG(TENANT_ID_); +create index ACT_IDX_HI_EXTTASKLOG_ERRORDET on ACT_HI_EXT_TASK_LOG(ERROR_DETAILS_ID_); +create index ACT_HI_EXT_TASK_LOG_RM_TIME on ACT_HI_EXT_TASK_LOG(REMOVAL_TIME_); + +create index ACT_IDX_HI_OP_LOG_ROOT_PI on ACT_HI_OP_LOG(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_OP_LOG_PROCINST on ACT_HI_OP_LOG(PROC_INST_ID_); +create index ACT_IDX_HI_OP_LOG_PROCDEF on ACT_HI_OP_LOG(PROC_DEF_ID_); +create index ACT_IDX_HI_OP_LOG_TASK on ACT_HI_OP_LOG(TASK_ID_); +create index ACT_IDX_HI_OP_LOG_RM_TIME on ACT_HI_OP_LOG(REMOVAL_TIME_); +create index ACT_IDX_HI_OP_LOG_TIMESTAMP on ACT_HI_OP_LOG(TIMESTAMP_); + +create index ACT_IDX_HI_ATTACHMENT_CONTENT on ACT_HI_ATTACHMENT(CONTENT_ID_); +create index ACT_IDX_HI_ATTACHMENT_ROOT_PI on ACT_HI_ATTACHMENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_ATTACHMENT_PROCINST on ACT_HI_ATTACHMENT(PROC_INST_ID_); +create index ACT_IDX_HI_ATTACHMENT_TASK on ACT_HI_ATTACHMENT(TASK_ID_); +create index ACT_IDX_HI_ATTACHMENT_RM_TIME on ACT_HI_ATTACHMENT(REMOVAL_TIME_); + +create index ACT_IDX_HI_COMMENT_TASK on ACT_HI_COMMENT(TASK_ID_); +create index ACT_IDX_HI_COMMENT_ROOT_PI on ACT_HI_COMMENT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_COMMENT_PROCINST on ACT_HI_COMMENT(PROC_INST_ID_); +create index ACT_IDX_HI_COMMENT_RM_TIME on ACT_HI_COMMENT(REMOVAL_TIME_); +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_HI_CASEINST ( + ID_ varchar(64) not null, + CASE_INST_ID_ varchar(64) not null, + BUSINESS_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64) not null, + CREATE_TIME_ datetime not null, + CLOSE_TIME_ datetime, + DURATION_ bigint, + STATE_ integer, + CREATE_USER_ID_ varchar(255), + SUPER_CASE_INSTANCE_ID_ varchar(64), + SUPER_PROCESS_INSTANCE_ID_ varchar(64), + TENANT_ID_ varchar(64), + primary key (ID_), + unique (CASE_INST_ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_HI_CASEACTINST ( + ID_ varchar(64) not null, + PARENT_ACT_INST_ID_ varchar(64), + CASE_DEF_ID_ varchar(64) not null, + CASE_INST_ID_ varchar(64) not null, + CASE_ACT_ID_ varchar(255) not null, + TASK_ID_ varchar(64), + CALL_PROC_INST_ID_ varchar(64), + CALL_CASE_INST_ID_ varchar(64), + CASE_ACT_NAME_ varchar(255), + CASE_ACT_TYPE_ varchar(255), + CREATE_TIME_ datetime not null, + END_TIME_ datetime, + DURATION_ bigint, + STATE_ integer, + REQUIRED_ boolean, + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create index ACT_IDX_HI_CAS_I_CLOSE on ACT_HI_CASEINST(CLOSE_TIME_); +create index ACT_IDX_HI_CAS_I_BUSKEY on ACT_HI_CASEINST(BUSINESS_KEY_); +create index ACT_IDX_HI_CAS_I_TENANT_ID on ACT_HI_CASEINST(TENANT_ID_); +create index ACT_IDX_HI_CAS_A_I_CREATE on ACT_HI_CASEACTINST(CREATE_TIME_); +create index ACT_IDX_HI_CAS_A_I_END on ACT_HI_CASEACTINST(END_TIME_); +create index ACT_IDX_HI_CAS_A_I_COMP on ACT_HI_CASEACTINST(CASE_ACT_ID_, END_TIME_, ID_); +create index ACT_IDX_HI_CAS_A_I_CASEINST on ACT_HI_CASEACTINST(CASE_INST_ID_, CASE_ACT_ID_); +create index ACT_IDX_HI_CAS_A_I_TENANT_ID on ACT_HI_CASEACTINST(TENANT_ID_); +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- create history decision instance table -- +create table ACT_HI_DECINST ( + ID_ varchar(64) NOT NULL, + DEC_DEF_ID_ varchar(64) NOT NULL, + DEC_DEF_KEY_ varchar(255) NOT NULL, + DEC_DEF_NAME_ varchar(255), + PROC_DEF_KEY_ varchar(255), + PROC_DEF_ID_ varchar(64), + PROC_INST_ID_ varchar(64), + CASE_DEF_KEY_ varchar(255), + CASE_DEF_ID_ varchar(64), + CASE_INST_ID_ varchar(64), + ACT_INST_ID_ varchar(64), + ACT_ID_ varchar(255), + EVAL_TIME_ datetime not null, + REMOVAL_TIME_ datetime, + COLLECT_VALUE_ double, + USER_ID_ varchar(255), + ROOT_DEC_INST_ID_ varchar(64), + ROOT_PROC_INST_ID_ varchar(64), + DEC_REQ_ID_ varchar(64), + DEC_REQ_KEY_ varchar(255), + TENANT_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create history decision input table -- +create table ACT_HI_DEC_IN ( + ID_ varchar(64) NOT NULL, + DEC_INST_ID_ varchar(64) NOT NULL, + CLAUSE_ID_ varchar(64), + CLAUSE_NAME_ varchar(255), + VAR_TYPE_ varchar(100), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + CREATE_TIME_ datetime, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +-- create history decision output table -- +create table ACT_HI_DEC_OUT ( + ID_ varchar(64) NOT NULL, + DEC_INST_ID_ varchar(64) NOT NULL, + CLAUSE_ID_ varchar(64), + CLAUSE_NAME_ varchar(255), + RULE_ID_ varchar(64), + RULE_ORDER_ integer, + VAR_NAME_ varchar(255), + VAR_TYPE_ varchar(100), + BYTEARRAY_ID_ varchar(64), + DOUBLE_ double, + LONG_ bigint, + TEXT_ varchar(4000), + TEXT2_ varchar(4000), + TENANT_ID_ varchar(64), + CREATE_TIME_ datetime, + ROOT_PROC_INST_ID_ varchar(64), + REMOVAL_TIME_ datetime, + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + + +create index ACT_IDX_HI_DEC_INST_ID on ACT_HI_DECINST(DEC_DEF_ID_); +create index ACT_IDX_HI_DEC_INST_KEY on ACT_HI_DECINST(DEC_DEF_KEY_); +create index ACT_IDX_HI_DEC_INST_PI on ACT_HI_DECINST(PROC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_CI on ACT_HI_DECINST(CASE_INST_ID_); +create index ACT_IDX_HI_DEC_INST_ACT on ACT_HI_DECINST(ACT_ID_); +create index ACT_IDX_HI_DEC_INST_ACT_INST on ACT_HI_DECINST(ACT_INST_ID_); +create index ACT_IDX_HI_DEC_INST_TIME on ACT_HI_DECINST(EVAL_TIME_); +create index ACT_IDX_HI_DEC_INST_TENANT_ID on ACT_HI_DECINST(TENANT_ID_); +create index ACT_IDX_HI_DEC_INST_ROOT_ID on ACT_HI_DECINST(ROOT_DEC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_REQ_ID on ACT_HI_DECINST(DEC_REQ_ID_); +create index ACT_IDX_HI_DEC_INST_REQ_KEY on ACT_HI_DECINST(DEC_REQ_KEY_); +create index ACT_IDX_HI_DEC_INST_ROOT_PI on ACT_HI_DECINST(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_INST_RM_TIME on ACT_HI_DECINST(REMOVAL_TIME_); + +create index ACT_IDX_HI_DEC_IN_INST on ACT_HI_DEC_IN(DEC_INST_ID_); +create index ACT_IDX_HI_DEC_IN_CLAUSE on ACT_HI_DEC_IN(DEC_INST_ID_, CLAUSE_ID_); +create index ACT_IDX_HI_DEC_IN_ROOT_PI on ACT_HI_DEC_IN(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_IN_RM_TIME on ACT_HI_DEC_IN(REMOVAL_TIME_); + +create index ACT_IDX_HI_DEC_OUT_INST on ACT_HI_DEC_OUT(DEC_INST_ID_); +create index ACT_IDX_HI_DEC_OUT_RULE on ACT_HI_DEC_OUT(RULE_ORDER_, CLAUSE_ID_); +create index ACT_IDX_HI_DEC_OUT_ROOT_PI on ACT_HI_DEC_OUT(ROOT_PROC_INST_ID_); +create index ACT_IDX_HI_DEC_OUT_RM_TIME on ACT_HI_DEC_OUT(REMOVAL_TIME_); diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql new file mode 100644 index 0000000..113240d --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/scripts/mysql_identity_7.10.0.sql @@ -0,0 +1,103 @@ +-- +-- Copyright © 2012 - 2018 camunda services GmbH and various authors (info@camunda.com) +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +create table ACT_ID_GROUP ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + TYPE_ varchar(255), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_ID_MEMBERSHIP ( + USER_ID_ varchar(64), + GROUP_ID_ varchar(64), + primary key (USER_ID_, GROUP_ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_ID_USER ( + ID_ varchar(64), + REV_ integer, + FIRST_ varchar(255), + LAST_ varchar(255), + EMAIL_ varchar(255), + PWD_ varchar(255), + SALT_ varchar(255), + LOCK_EXP_TIME_ timestamp NULL, + ATTEMPTS_ integer, + PICTURE_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_ID_INFO ( + ID_ varchar(64), + REV_ integer, + USER_ID_ varchar(64), + TYPE_ varchar(64), + KEY_ varchar(255), + VALUE_ varchar(255), + PASSWORD_ LONGBLOB, + PARENT_ID_ varchar(255), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_ID_TENANT ( + ID_ varchar(64), + REV_ integer, + NAME_ varchar(255), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +create table ACT_ID_TENANT_MEMBER ( + ID_ varchar(64) not null, + TENANT_ID_ varchar(64) not null, + USER_ID_ varchar(64), + GROUP_ID_ varchar(64), + primary key (ID_) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin; + +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_GROUP + foreign key (GROUP_ID_) + references ACT_ID_GROUP (ID_); + +alter table ACT_ID_MEMBERSHIP + add constraint ACT_FK_MEMB_USER + foreign key (USER_ID_) + references ACT_ID_USER (ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_UNIQ_TENANT_MEMB_USER + unique (TENANT_ID_, USER_ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_UNIQ_TENANT_MEMB_GROUP + unique (TENANT_ID_, GROUP_ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB + foreign key (TENANT_ID_) + references ACT_ID_TENANT (ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB_USER + foreign key (USER_ID_) + references ACT_ID_USER (ID_); + +alter table ACT_ID_TENANT_MEMBER + add constraint ACT_FK_TENANT_MEMB_GROUP + foreign key (GROUP_ID_) + references ACT_ID_GROUP (ID_); diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml new file mode 100644 index 0000000..858345f --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/templates/configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{ .Values.global.mysqldb.appName }}-init-scripts + +data: + {{- $files := .Files }} + {{- range $key, $value := .Files }} + {{- if hasPrefix "scripts/" $key }} {{/* only when in scripts/ */}} + {{ $key | trimPrefix "scripts/" }}: {{ $files.Get $key | quote }} {{/* adapt $key as desired */}} + {{- end }} + {{- end }} diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml new file mode 100644 index 0000000..41f8d33 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/templates/deployment.yaml @@ -0,0 +1,97 @@ +apiVersion: apps/v1 +kind: Deployment + +metadata: + name: {{ .Values.global.mysqldb.appName }} + namespace: {{.Values.global.mysqldb.namespace }} + labels: + app: {{ .Values.global.mysqldb.appName }} + version: {{.Values.global.mysqldb.version }} + +spec: + strategy: + # indicate which strategy we want for rolling update + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + replicas: {{ .Values.global.mysqldb.replicas }} + selector: + matchLabels: + app: {{ .Values.global.mysqldb.appName }} + version: {{.Values.global.mysqldb.version }} + template: + metadata: + labels: + app: {{ .Values.global.mysqldb.appName }} + version: {{.Values.global.mysqldb.version }} + spec: + serviceAccountName: default + containers: + - name: {{ .Values.global.mysqldb.appName }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + image: {{ .Values.global.mysqldb.image.image }}:{{ .Values.global.mysqldb.image.tag }} + imagePullPolicy: Always + resources: + limits: + memory: {{ .Values.global.mysqldb.resources.limits.memory }} + cpu: {{ .Values.global.mysqldb.resources.limits.cpu }} + requests: + memory: {{ .Values.global.mysqldb.resources.requests.memory }} + cpu: {{ .Values.global.mysqldb.resources.requests.cpu }} + env: + - name: MYSQL_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.global.mysqldb.appName }} + key: mysql_root_password + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.global.mysqldb.appName }} + key: mysql_password + - name: MYSQL_USER + value: {{ .Values.global.mysqldb.mysqlUser | quote }} + - name: MYSQL_DATABASE + value: {{ .Values.global.mysqldb.mysqlDatabase | quote }} + - name: TZ + value: {{ .Values.timezone }} + ports: + - name: {{ .Values.global.mysqldb.appName }} + containerPort: 3306 + livenessProbe: + exec: + command: + - sh + - -c + - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}" + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + readinessProbe: + exec: + command: + - sh + - -c + - "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}" + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + lifecycle: + postStart: + exec: + command: ["/bin/bash", "-c", "cd data/scripts;./init_db.sh"] + volumeMounts: + - name: custom-init-scripts + mountPath: /data/scripts + volumes: + - name: custom-init-scripts + configMap: + name: {{ .Values.global.mysqldb.appName }}-init-scripts + defaultMode: 0755 + restartPolicy: Always + diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml new file mode 100644 index 0000000..4abb02c --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/templates/ingress.yaml @@ -0,0 +1,30 @@ +# Need Updates to configure the connectivity when we deploy other microservices to connect to mysqldb + +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ .Values.global.mysqldb.appName }} + namespace: {{.Values.global.mysqldb.namespace }} + labels: + app: {{ .Values.global.mysqldb.appName }} + version: {{.Values.global.mysqldb.version }} + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port; + grpc_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:$service_port; +spec: + tls: + - hosts: + - {{ .Values.global.mysqldb.nodeApi.host }} + secretName: {{.Values.global.mysqldb.certName }} + rules: + - host: {{ .Values.global.mysqldb.nodeApi.host }} + http: + paths: + - path: /mysqldb/(.*) + backend: + serviceName: {{ .Values.global.mysqldb.appName }} + servicePort: {{ .Values.global.mysqldb.port }} \ No newline at end of file diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml new file mode 100644 index 0000000..80834a7 --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/templates/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.global.mysqldb.appName }} +type: opaque +data: + mysql_root_password: {{ .Values.global.mysqldb.mysqlRootPassword | b64enc }} + mysql_password: {{ .Values.global.mysqldb.mysqlPassword | b64enc }} diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml new file mode 100644 index 0000000..7a9c79a --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.global.mysqldb.appName }} + namespace: {{ .Values.global.mysqldb.namespace }} + labels: + app: {{ .Values.global.mysqldb.appName }} + version: {{ .Values.global.mysqldb.version }} + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" +spec: + type: LoadBalancer + ports: + - port: {{ .Values.global.mysqldb.port }} + protocol: TCP + targetPort: {{ .Values.global.mysqldb.targetPort }} + selector: + app: {{ .Values.global.mysqldb.appName }} + version: {{ .Values.global.mysqldb.version }} diff --git a/otf-helm/otf/charts/databases/charts/mysqldb/values.yaml b/otf-helm/otf/charts/databases/charts/mysqldb/values.yaml new file mode 100644 index 0000000..1c0457b --- /dev/null +++ b/otf-helm/otf/charts/databases/charts/mysqldb/values.yaml @@ -0,0 +1,233 @@ +# Values yaml file for reference from the github. - currently not used. + +## mysql image version +## ref: https://hub.docker.com/r/library/mysql/tags/ +## +image: "mysql" +imageTag: "5.7.26" + +strategy: + type: Recreate + +busybox: + image: "busybox" + tag: "1.29.3" + +testFramework: + enabled: true + image: "dduportal/bats" + tag: "0.4.0" + +## Specify password for root user +## +## Default: random 10 character string +# mysqlRootPassword: testing + +## Create a database user +## +# mysqlUser: +## Default: random 10 character string +# mysqlPassword: + +## Allow unauthenticated access, uncomment to enable +## +# mysqlAllowEmptyPassword: true + +## Create a database +## +# mysqlDatabase: + +## Specify an imagePullPolicy (Required) +## It's recommended to change this to 'Always' if the image tag is 'latest' +## ref: http://kubernetes.io/docs/user-guide/images/#updating-images +## +imagePullPolicy: IfNotPresent + +## Additionnal arguments that are passed to the MySQL container. +## For example use --default-authentication-plugin=mysql_native_password if older clients need to +## connect to a MySQL 8 instance. +args: [] + +extraVolumes: | + # - name: extras + # emptyDir: {} + +extraVolumeMounts: | + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +# Optionally specify an array of imagePullSecrets. +# Secrets must be manually created in the namespace. +# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod +# imagePullSecrets: + # - name: myRegistryKeySecretName + +## Node selector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +nodeSelector: {} + +## Affinity +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +livenessProbe: + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + +readinessProbe: + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + +## Persist data to a persistent volume +persistence: + enabled: true + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 8Gi + annotations: {} + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Security context +securityContext: + enabled: false + runAsUser: 999 + fsGroup: 999 + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 100m + +# Custom mysql configuration files path +configurationFilesPath: /etc/mysql/conf.d/ + +# Custom mysql configuration files used to override default mysql settings +configurationFiles: {} +# mysql.cnf: |- +# [mysqld] +# skip-name-resolve +# ssl-ca=/ssl/ca.pem +# ssl-cert=/ssl/server-cert.pem +# ssl-key=/ssl/server-key.pem + +# Custom mysql init SQL files used to initialize the database +initializationFiles: {} +# first-db.sql: |- +# CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; +# second-db.sql: |- +# CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; + +metrics: + enabled: false + image: prom/mysqld-exporter + imageTag: v0.10.0 + imagePullPolicy: IfNotPresent + resources: {} + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "9104" + livenessProbe: + initialDelaySeconds: 15 + timeoutSeconds: 5 + readinessProbe: + initialDelaySeconds: 5 + timeoutSeconds: 1 + flags: [] + serviceMonitor: + enabled: false + additionalLabels: {} + +## Configure the service +## ref: http://kubernetes.io/docs/user-guide/services/ +service: + annotations: {} + ## Specify a service type + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types + type: ClusterIP + port: 3306 + # nodePort: 32000 + # loadBalancerIP: + +## Pods Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the mariadb.fullname template + # name: + +ssl: + enabled: false + secret: mysql-ssl-certs + certificates: +# - name: mysql-ssl-certs +# ca: |- +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# cert: |- +# -----BEGIN CERTIFICATE----- +# ... +# -----END CERTIFICATE----- +# key: |- +# -----BEGIN RSA PRIVATE KEY----- +# ... +# -----END RSA PRIVATE KEY----- + +## Populates the 'TZ' system timezone environment variable +## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html +## +## Default: nil (mysql will use image's default timezone, normally UTC) +## Example: 'Australia/Sydney' +# timezone: + +# Deployment Annotations +deploymentAnnotations: {} + +# To be added to the database server pod(s) +podAnnotations: {} +podLabels: {} + +## Set pod priorityClassName +# priorityClassName: {} + + +## Init container resources defaults +initContainer: + resources: + requests: + memory: 10Mi + cpu: 10m diff --git a/otf-helm/otf/values/development.yaml b/otf-helm/otf/values/development.yaml new file mode 100644 index 0000000..894d035 --- /dev/null +++ b/otf-helm/otf/values/development.yaml @@ -0,0 +1 @@ +#These are the development environment specific values diff --git a/otf-helm/readme.md b/otf-helm/readme.md new file mode 100644 index 0000000..c9db864 --- /dev/null +++ b/otf-helm/readme.md @@ -0,0 +1,5 @@ +Open Test Framework + +Use these heml charts to deploy otf + +helm install otf \ No newline at end of file