self.logger = trainingmgr_main.LOGGER
#test_positive_1
- db_result = [('mynetwork', 'testing', '*', 'testing_pipeline', 'Default', '{"arguments": {"epochs": "1", "trainingjob_name": "mynetwork"}}', '', datetime.datetime(2023, 2, 9, 9, 2, 11, 13916), 'No data available', '{"DATA_EXTRACTION": "FINISHED", "DATA_EXTRACTION_AND_TRAINING": "IN_PROGRESS", "TRAINING": "NOT_STARTED", "TRAINING_AND_TRAINED_MODEL": "NOT_STARTED", "TRAINED_MODEL": "NOT_STARTED"}', datetime.datetime(2023, 2, 9, 9, 2, 11, 13916), 1, False, '2', '{"datalake_source": {"InfluxSource": {}}}', 'No data available.', '', 'liveCell', 'UEData', False)]
+ db_result = [('mynetwork', 'testing', '*', 'testing_pipeline', 'Default', '{"arguments": {"epochs": "1", "trainingjob_name": "mynetwork"}}', '', datetime.datetime(2023, 2, 9, 9, 2, 11, 13916), 'No data available', '{"DATA_EXTRACTION": "FINISHED", "DATA_EXTRACTION_AND_TRAINING": "IN_PROGRESS", "TRAINING": "NOT_STARTED", "TRAINING_AND_TRAINED_MODEL": "NOT_STARTED", "TRAINED_MODEL": "NOT_STARTED"}', datetime.datetime(2023, 2, 9, 9, 2, 11, 13916), 1, False, '2', '{"datalake_source": {"InfluxSource": {}}}', 'No data available.', '', 'liveCell', 'UEData', False, False, '','')]
mocked_TRAININGMGR_CONFIG_OBJ=mock.Mock(name="TRAININGMGR_CONFIG_OBJ")
attrs_TRAININGMGR_CONFIG_OBJ = {'my_ip.return_value': '123'}
mocked_TRAININGMGR_CONFIG_OBJ.configure_mock(**attrs_TRAININGMGR_CONFIG_OBJ)
datalake_source = get_one_key(json.loads(results[0][14])["datalake_source"])
_measurement = results[0][17]
bucket = results[0][18]
+ is_mme=results[0][20]
+ model_name=results[0][21]
+ model_info=results[0][22]
notification_url = ""
if "notification_url" in obj:
query_filter = obj['feature_filter']
try:
- add_update_trainingjob(description, pipeline_name, experiment_name,
- featuregroup_name, arguments, query_filter, False,
- enable_versioning, pipeline_version,
- datalake_source, trainingjob_name, PS_DB_OBJ,
- notification_url, _measurement, bucket)
+ add_update_trainingjob(description, pipeline_name, experiment_name, featuregroup_name,
+ arguments, query_filter, False, enable_versioning,
+ pipeline_version, datalake_source, trainingjob_name,
+ PS_DB_OBJ, _measurement=_measurement,
+ bucket=bucket, is_mme=is_mme, model_name=model_name, model_info=model_info)
except Exception as err:
not_possible_to_retrain.append(trainingjob_name)
LOGGER.debug(str(err) + "(training job is " + trainingjob_name + ")")