adding the flask-sqlalchemy orm 65/13665/4
authorrajdeep11 <rajdeep.sin@samsung.com>
Fri, 18 Oct 2024 11:18:34 +0000 (16:48 +0530)
committerrajdeep11 <rajdeep.sin@samsung.com>
Sat, 19 Oct 2024 10:01:22 +0000 (15:31 +0530)
adding flask-migrate for migrating database
added the models: featuregroup and trainingjob

Change-Id: I9a127a11ac908e199e32dc88a8b07aa18c7ebb1f
Signed-off-by: rajdeep11 <rajdeep.sin@samsung.com>
requirements.txt
tox.ini
trainingmgr/models/__init__.py [new file with mode: 0644]
trainingmgr/models/featuregroup.py [new file with mode: 0644]
trainingmgr/models/trainingjob.py [new file with mode: 0644]
trainingmgr/trainingmgr_main.py

index 84f48ba..113da19 100644 (file)
@@ -27,3 +27,6 @@ PyYAML
 kubernetes
 validators==0.20.0
 Werkzeug==2.2.2
+Flask-SQLAlchemy
+Flask-Migrate
+psycopg2-binary
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index 00fe01c..70bf1d4 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -43,6 +43,7 @@ deps=
   pg8000
   Werkzeug==2.2.2
   validators==0.20.0
+  Flask-Migrate
 
 setenv = cd  = {toxinidir}/tests
 commands =
diff --git a/trainingmgr/models/__init__.py b/trainingmgr/models/__init__.py
new file mode 100644 (file)
index 0000000..d6bf951
--- /dev/null
@@ -0,0 +1,8 @@
+from flask_sqlalchemy import SQLAlchemy
+
+db = SQLAlchemy()
+
+from trainingmgr.models.trainingjob import TrainingJob
+from trainingmgr.models.featuregroup import FeatureGroup
+
+__all_ = ['TrainingJob', 'FeatureGroup']
\ No newline at end of file
diff --git a/trainingmgr/models/featuregroup.py b/trainingmgr/models/featuregroup.py
new file mode 100644 (file)
index 0000000..b434280
--- /dev/null
@@ -0,0 +1,21 @@
+from . import db
+
+class FeatureGroup(db.Model):
+    __tablename__ = "featuregroup_info_table"
+    id = db.Column(db.Integer, primary_key=True)
+    featuregroup_name = db.Column(db.String(128), nullable=False)
+    feature_list = db.Column(db.String(1000), nullable=False)
+    datalake_source = db.Column(db.String(20000), nullable=False)
+    host = db.Column(db.String(128), nullable=False)
+    port = db.Column(db.String(128), nullable=False)
+    bucket = db.Column(db.String(1000), nullable=False)
+    token = db.Column(db.String(1000), nullable=False)
+    db_org = db.Column(db.String(128), nullable=False)
+    measurement = db.Column(db.String(1000), nullable=False)
+    enable_dme = db.Column(db.Boolean, nullable=False)
+    measured_obj_class = db.Column(db.String(20000), nullable=False)
+    dme_port = db.Column(db.String(128), nullable=False)
+    source_name = db.Column(db.String(20000), nullable=False)
+
+    def __repr__(self):
+        return f'<featuregroup {self.featuregroup_name}>'
\ No newline at end of file
diff --git a/trainingmgr/models/trainingjob.py b/trainingmgr/models/trainingjob.py
new file mode 100644 (file)
index 0000000..85ae701
--- /dev/null
@@ -0,0 +1,31 @@
+from . import db
+from datetime import datetime
+from sqlalchemy.sql import func
+
+class TrainingJob(db.Model):
+    __tablename__ = "trainingjob_info_table"
+    id = db.Column(db.Integer, primary_key=True)
+    trainingjob_name= db.Column(db.String(128), nullable=False)
+    description = db.Column(db.String(2000), nullable=False)
+    feature_group_name = db.Column(db.String(128), nullable=False)
+    pipeline_name= db.Column(db.String(128), nullable=False)
+    experiment_name = db.Column(db.String(128), nullable=False)
+    arguments = db.Column(db.String(2000), nullable=False)
+    query_filter = db.Column(db.String(2000), nullable=False)
+    creation_time = db.Column(db.DateTime(timezone=False), server_default=func.now(),nullable=False)
+    run_id = db.Column(db.String(1000), nullable=False)
+    steps_state = db.Column(db.String(1000), nullable=False)
+    updation_time = db.Column(db.DateTime(timezone=False),onupdate=func.now() ,nullable=False)
+    version = db.Column(db.Integer, nullable=False)
+    enable_versioning = db.Column(db.Boolean, nullable=False)
+    pipeline_version = db.Column(db.String(128), nullable=False)
+    datalake_source = db.Column(db.String(2000), nullable=False)
+    model_url = db.Column(db.String(1000), nullable=False)
+    notification_url = db.Column(db.String(1000), nullable=False)
+    deletion_in_progress = db.Column(db.Boolean, nullable=False)
+    is_mme = db.Column(db.Boolean, nullable=False)
+    model_name = db.Column(db.String(128), nullable=False)
+    model_info = db.Column(db.String(1000), nullable=False)
+
+    def __repr__(self):
+        return f'<Trainingjob {self.trainingjob_name}>'
\ No newline at end of file
index 3a213c6..91ddc47 100644 (file)
@@ -29,6 +29,7 @@ from threading import Lock
 import time
 from flask import Flask, request, send_file
 from flask_api import status
+from flask_migrate import Migrate
 import requests
 from flask_cors import CORS
 from werkzeug.utils import secure_filename
@@ -54,6 +55,7 @@ from trainingmgr.db.common_db_fun import get_data_extraction_in_progress_trainin
     update_model_download_url, add_update_trainingjob, add_featuregroup, edit_featuregroup, \
     get_field_of_given_version,get_all_jobs_latest_status_version, get_info_of_latest_version, \
     get_feature_groups_db, get_feature_group_by_name_db, delete_feature_group_by_name, delete_trainingjob_version, change_field_value_by_version
+from trainingmgr.models import db, TrainingJob, FeatureGroup
 
 APP = Flask(__name__)
 TRAININGMGR_CONFIG_OBJ = None
@@ -1745,6 +1747,12 @@ if __name__ == "__main__":
             raise TMException("Not all configuration loaded.")
         LOGGER = TRAININGMGR_CONFIG_OBJ.logger
         PS_DB_OBJ = PSDB(TRAININGMGR_CONFIG_OBJ)
+        APP.config['SQLALCHEMY_DATABASE_URI']=f'postgresql+psycopg2://{TRAININGMGR_CONFIG_OBJ.ps_user}:{TRAININGMGR_CONFIG_OBJ.ps_password}@{TRAININGMGR_CONFIG_OBJ.ps_ip}:{TRAININGMGR_CONFIG_OBJ.ps_port}/training_manager_database'
+        db.init_app(APP)
+        # Todo add flask db upgrade in the docker file  
+        migrate = Migrate(APP, db) 
+        with APP.app_context():
+            db.create_all()
         LOCK = Lock()
         DATAEXTRACTION_JOBS_CACHE = get_data_extraction_in_progress_trainingjobs(PS_DB_OBJ)
         threading.Thread(target=async_feature_engineering_status, daemon=True).start()