Index: cloud_sync.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloud_sync.py (.../cloud_sync.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloud_sync.py (.../cloud_sync.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -1,3 +1,9 @@ +import werkzeug +werkzeug.cached_property = werkzeug.utils.cached_property + +from flask import Flask, Response, request +from flask_restplus import Api, Resource, fields, reqparse + from cloudsync.handlers.error import Error from cloudsync.utils.heartbeat import HeartBeatProvider from cloudsync.busses.file_input_bus import FileInputBus @@ -9,20 +15,12 @@ from cloudsync.utils.globals import * from cloudsync.utils.helpers import * from cloudsync.utils.logging import LoggingConfig -from flask_restplus import Api, Resource, fields, reqparse -from flask import Flask, Response, request import os import sys -import logging -from logging.handlers import TimedRotatingFileHandler -from cloudsync.handlers.logs_handler import CustomTimedRotatingFileHandlerHandler -import werkzeug -werkzeug.cached_property = werkzeug.utils.cached_property +VERSION = "0.4.11" -VERSION = "0.4.10" - arguments = sys.argv app = Flask(__name__) @@ -73,10 +71,10 @@ heartbeat_provider = HeartBeatProvider(logger=app.logger, network_request_handler=network_request_handler, output_channel=output_channel) - logconf.set_log_level(g_config[CONFIG_DEVICE][CONFIG_LOG_LEVEL]) logconf.set_network_provider(network_request_handler=network_request_handler) logconf.set_error_provider(error_handler=error_handler) logconf.set_configuration(g_config=g_config) + logconf.set_log_level(g_config[CONFIG_LOGS][CONFIG_LOGS_DEFAULT_LOG_LEVEL]) if g_config[CONFIG_DEVICE][CONFIG_DEVICE_MODE] == 'operation': heartbeat_provider.send_heartbeat = True @@ -214,11 +212,13 @@ # END - REGISTRATION ENDPOINTS +# START - TESTING ENDPOINTS +# END - TESTING ENDPOINTS def main(): if g_config[CONFIG_DEVICE][CONFIG_DEVICE_MODE] == 'registration': - app.run(debug=False, use_reloader=False, host="0.0.0.0", port=g_config[CONFIG_DEVICE][CONFIG_DEVICE_PORT]) + app.run(debug=False, use_reloader=False, host="0.0.0.0",port=g_config[CONFIG_DEVICE][CONFIG_DEVICE_PORT]) else: while True: sleep(1) Index: cloudsync/common/enums.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/common/enums.py (.../enums.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/common/enums.py (.../enums.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -23,6 +23,7 @@ except Exception: return None + @unique class HDOpModes(RootEnum): MODE_FAUL = 0 # Fault mode @@ -96,6 +97,7 @@ # OUTGOING ERROR CS2UI_ERROR = 2999 + @unique class NetworkRequestType(RootEnum): MFT2CS_REQ_SET_CREDENTIALS = 4 Index: cloudsync/config/log_upload_template.json =================================================================== diff -u --- cloudsync/config/log_upload_template.json (revision 0) +++ cloudsync/config/log_upload_template.json (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -0,0 +1,33 @@ +{ + "start_session": { + "generatedAt": 0, + "dataType": "", + "serialNumber": "", + "macAddress": "", + "organizationId": "", + "reference": "", + "metadata": { + "dataType" : "", + "deviceLogType": "", + "deviceSubType": "", + "deviceFileName": "", + "startTimestamp": "", + "endTimestamp": "" + } + }, + "upload_chunk": { + "sessionId": "", + "chunkNo": 0, + "chunkType": "", + "data": "" + }, + "end_session": { + "sessionId": "", + "checksum": "", + "completedAt": 0 + }, + "general": { + "file_size": 0, + "file_path": "" + } +} \ No newline at end of file Index: cloudsync/config/treatment_report_template.json =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/config/treatment_report_template.json (.../treatment_report_template.json) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/config/treatment_report_template.json (.../treatment_report_template.json) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -2,6 +2,7 @@ "organizationId": "", "serialNumber": "", "checksum": "", + "reference": "", "data": { "patient": { "id": "" Index: cloudsync/handlers/cs_mft_dcs_request_handler.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/handlers/cs_mft_dcs_request_handler.py (.../cs_mft_dcs_request_handler.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/handlers/cs_mft_dcs_request_handler.py (.../cs_mft_dcs_request_handler.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -129,9 +129,8 @@ # OPERATION MODE elif req.request_type == NetworkRequestType.CS2DCS_REQ_SET_DEVICE_STATE: try: - device_state_json = req.payload + device_state_json = req.payload # empty {} device_state = req.g_config[CONFIG_DEVICE][CONFIG_DEVICE_STATE] - device_log_json = req.payload base_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DCS_URL] identity_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DEVICE_IDENTITY_URL] client_secret = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_IDP_CLIENT_SECRET] @@ -145,8 +144,8 @@ if access_token is not None: # Decide whether to inform DCS for the new log level - if helpers_should_update_dcs_log_level(): - device_state_json['logLevel'] = self.logconf.get_log_level()[1] + if helpers_should_update_dcs_log_level(req.g_config): + device_state_json['logLevel'] = self.logconf.get_log_level()['text_value'].lower() self.logconf.set_dcs_flag(0) # Reset the update_dcs_flag device_state_json['state'] = DeviceStates.mapped_int_value(device_state).value @@ -161,18 +160,21 @@ if response.status_code == 200: self.logger.debug("CS set Device state: {0}".format(response.json())) - if helpers_should_update_cs_log_level(response.json()): + if helpers_should_update_cs_log_level(response.json(), req.g_config): new_log_level = response.json()['logLevel'] - # Set the DCS flag - self.logconf.set_dcs_flag(1) - # Calculate the duration of the new log level - log_level_duration = helpers_calculate_log_level_duration(response.json()) - log_level_duration = log_level_duration if log_level_duration else req.g_config[CONFIG_DEVICE][CONFIG_LOG_LEVEL_DURATION] - self.logconf.set_log_duration(log_level_duration) - # Update the log level - self.logconf.set_log_level(new_log_level) - # Start the timer - self.logconf.start_countdown() + if new_log_level != "error": + # Set the DCS flag + self.logconf.set_dcs_flag(1) + # Calculate the duration of the new log level + log_level_duration = helpers_calculate_log_level_duration(response.json()) + log_level_duration = log_level_duration if log_level_duration else req.g_config[CONFIG_DEVICE][CONFIG_LOG_LEVEL_DURATION] + self.logconf.set_log_duration(log_level_duration) + # Update the log level + self.logconf.set_log_level(new_log_level) + # Start the timer + self.logconf.start_countdown() + else: + self.logconf.revert_to_error() self.logger.debug("DCS Request device state response code: {0} & full response: {1}".format(response.status_code, response.text)) @@ -324,17 +326,14 @@ self.error_handler.enqueue_error(error=error) elif req.request_type == NetworkRequestType.CS2DCS_REQ_SEND_DEVICE_LOG: try: - device_log_json = req.payload - device_log_filename = device_log_json['metadata']['deviceFileName'] + device_log_data = req.payload base_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DCS_URL] identity_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DEVICE_IDENTITY_URL] client_secret = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_IDP_CLIENT_SECRET] token_verification_url = urllib.parse.urljoin( base_url, DEVICE_TOKEN_VALIDATION) validate_url = urllib.parse.urljoin( base_url, "api/device/validate") - data_submission_url = urllib.parse.urljoin( - base_url, "/api/device/log") # Step #1 - get access token @@ -368,21 +367,20 @@ return response else: organization_id = response.get("associatedOrganizationId", None) - device_log_json['organizationId'] = organization_id + device_log_data['organizationId'] = organization_id + # Step #3 - upload the device log file in chunks - # Step #3 - upload the device log file + device_log_json = helpers_construct_device_log_json(device_log_data) - device_log_json = json.dumps(device_log_json) + device_log_filename = cmd_outgoing_upload_file_in_chunks(base_url=base_url, + access_token=access_token, + file_json=device_log_json, + error_handler=self.error_handler, + log_file_origin='device') - response = cmd_outgoing_send_device_log(url=data_submission_url, - access_token=access_token, - device_log=device_log_json, - error_handler=self.error_handler) - - if response is not None: - self.logger.debug( - "Device log file uploaded: {0}".format(response.json())) + if device_log_filename is not None: + self.logger.debug("Device log file uploaded: {device_log_filename}") # Send response to the UI message_body = str( OutboundMessageIDs.CS2UI_DEVICE_LOG_UPLOADED.value) + ',1,' + device_log_filename @@ -400,17 +398,14 @@ self.error_handler.enqueue_error(error=error) elif req.request_type == NetworkRequestType.CS2DCS_REQ_SEND_CS_LOG: try: - cs_log_json = req.payload - cs_log_filename = cs_log_json['metadata']['deviceFileName'] + cs_log_data = req.payload base_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DCS_URL] identity_url = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_DEVICE_IDENTITY_URL] client_secret = req.g_config[CONFIG_KEBORMED][CONFIG_KEBORMED_IDP_CLIENT_SECRET] token_verification_url = urllib.parse.urljoin( base_url, DEVICE_TOKEN_VALIDATION) validate_url = urllib.parse.urljoin( base_url, "api/device/validate") - data_submission_url = urllib.parse.urljoin( - base_url, "/api/device/log") # Step #1 - get access token @@ -444,29 +439,22 @@ return response else: organization_id = response.get("associatedOrganizationId", None) - cs_log_json['organizationId'] = organization_id + cs_log_data['organizationId'] = organization_id - # Step #3 - upload the device log file + # Step #3 - upload the cs log file - cs_log_json = json.dumps(cs_log_json) + cs_log_json = helpers_construct_cs_log_json(cs_log_data) - response = cmd_outgoing_send_cs_log(url=data_submission_url, - access_token=access_token, - cs_log=cs_log_json, - error_handler=self.error_handler) + cs_log_filename = cmd_outgoing_upload_file_in_chunks(base_url=base_url, + access_token=access_token, + file_json=cs_log_json, + error_handler=self.error_handler, + log_file_origin='cs') - # Step #4 - remove the uploaded file + if cs_log_filename is not None: + self.logger.debug("CS log file uploaded: {cs_log_filename}") - if response.status_code == 200 and response.json().get('sessionId'): - self.logger.debug("CS log file uploaded: {0}".format(response.json())) - file_to_delete = os.path.join(CS_LOG_PATH, cs_log_filename) - if os.path.exists(file_to_delete): - os.remove(file_to_delete) - self.logger.debug(f"CS log file deleted: {file_to_delete}") - else: - self.logger.error("CS log file upload error: {0}".format(response.json())) - else: error = Error( "{0},2,{1}, Missing access token".format(OutboundMessageIDs.CS2UI_ERROR.value, @@ -478,7 +466,6 @@ ErrorIDs.CS_DEVICE_LOG_ERROR.value, e)) self.error_handler.enqueue_error(error=error) - else: g_utils.logger.warning("Request type {0} not supported".format(req.request_type)) Index: cloudsync/handlers/logs_handler.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/handlers/logs_handler.py (.../logs_handler.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/handlers/logs_handler.py (.../logs_handler.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -50,19 +50,20 @@ def __upload_cs_log_file(cls, log_file_path): - cs_log_json = helpers_construct_cs_log_json(log_file_path) + cs_log_data = { + "path": log_file_path, + "serialNumber": cls.g_config[CONFIG_DEVICE][CONFIG_DEVICE_HD_SERIAL], + "checksum": helpers_sha256_checksum(log_file_path) + } - if cs_log_json: - cs_log_json['serialNumber'] = cls.g_config[CONFIG_DEVICE][CONFIG_DEVICE_HD_SERIAL] + g_utils.logger.debug("CS log data: {cs_log_data}") - g_utils.logger.debug("Device log {0}".format(cs_log_json)) - try: helpers_add_to_network_queue(network_request_handler=cls.network_request_handler, request_type=NetworkRequestType.CS2DCS_REQ_SEND_CS_LOG, url='', - payload=cs_log_json, + payload=cs_log_data, method='', g_config=cls.g_config, success_message='CS2DCS_REQ_SEND_CS_LOG request added to network queue') Index: cloudsync/handlers/outgoing/handler_cs_to_dcs.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/handlers/outgoing/handler_cs_to_dcs.py (.../handler_cs_to_dcs.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/handlers/outgoing/handler_cs_to_dcs.py (.../handler_cs_to_dcs.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -404,105 +404,158 @@ error_handler.enqueue_error(error=error) return None - @log_func -def cmd_outgoing_send_device_log(url: str, - access_token: str, - device_log: str, - error_handler: ErrorHandler) -> requests.Response: +def cmd_outgoing_upload_file_in_chunks( base_url: str, + access_token: str, + file_json: dict, + error_handler: ErrorHandler, + log_file_origin: str, + chunk_size: int=50 * 1024 * 1024, + retries: int=3 ) -> Union[str, None]: """ - Sends a UI logfile to DCS - :param url: set device state URL - :param access_token: access token - :param device_log: Device log payload - :param error_handler: global error handler - :return: The response + Uploads a large file in chunks using sessions and retries. + + Args: + base_url (str): The base URL of the API. + access_token (str): The access token used for Authorization. + file_json (dict): The populated/constructed `log_upload_template.json` file for a specific file. + error_handler (ErrorHandler): Current `ErrorHandler` instance. + log_file_origin (str): The origin of the log file to be uploaded. Accepted values `device` or `cs` + chunk_size (int, optional): The size of each chunk in bytes. Defaults to 50MB. + retries (int, optional): The number of times to retry failed uploads. Defaults to 3. + + Returns: + str | None: The uploaded file name if succeeded, None otherwise. """ - try: + origins = ("cs", "device") + if log_file_origin not in origins: + g_utils.logger.error(f"Wrong log file origin provided.") + return None - headers = { - 'Authorization': BEARER_HOLDER.format(access_token), - 'Content-Type': CONTENT_TYPE, - 'User-Agent': USER_AGENT - } + ERROR_ID = ErrorIDs.CS_DEVICE_LOG_ERROR.value if log_file_origin == 'device' else ErrorIDs.CS_LOG_ERROR.value - payload = device_log + # + # Start upload session + # - resp = requests.post(url=url, - headers=headers, - data=payload) - return resp + start_session_url = os.path.join(base_url, "api/device/data/start-session") + start_session_payload = file_json['start_session'] + start_session_payload = json.dumps(start_session_payload) + headers = { + 'Authorization': BEARER_HOLDER.format(access_token), + 'Content-Type': CONTENT_TYPE, + 'User-Agent': USER_AGENT + } - except FileNotFoundError: - error = Error(FILE_NOT_FOUND.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) + g_utils.logger.debug(f"File upload payload (start-session): {start_session_payload}") + + try: + response = requests.post( + url=start_session_url, + headers=headers, + data=start_session_payload) + + if response.status_code != 200: + raise Exception(f"Error while starting upload session: {response.status_code} - {response.text}") + + except Exception as e: + error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value,ERROR_ID,str(e))) error_handler.enqueue_error(error=error) return None - except requests.exceptions.Timeout: - error = Error(REGISTRATION_TIMEOUT_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) + + session_id = response.json().get("sessionId") + if not session_id: + error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value,ERROR_ID,"Missing session ID in response.")) error_handler.enqueue_error(error=error) return None - except requests.exceptions.TooManyRedirects: - error = Error(TOO_MANY_REDIRECTS_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) - error_handler.enqueue_error(error=error) - return None - except Exception as e: - error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value, - str(e))) - error_handler.enqueue_error(error=error) - return None -@log_func -def cmd_outgoing_send_cs_log(url: str, - access_token: str, - cs_log: str, - error_handler: ErrorHandler) -> requests.Response: - """ - Sends a CS logfile to DCS - :param url: set device state URL - :param access_token: access token - :param cs_log: Device log payload - :param error_handler: global error handler - :return: The response - """ - try: + # + # Send file in chunks + # - + try: + target_file = file_json['general']["file_path"] + file_size = file_json['general']["file_size"] + upload_chunk_url = os.path.join(base_url, "api/device/data/chunk") + upload_chunk_payload = file_json['upload_chunk'] + upload_chunk_payload['sessionId'] = session_id + upload_chunk_payload['chunkType'] = "device-data" + chunk_number = 1 headers = { 'Authorization': BEARER_HOLDER.format(access_token), 'Content-Type': CONTENT_TYPE, 'User-Agent': USER_AGENT } - payload = cs_log + with open(target_file, "rb") as f: + # Upload chunks with retry logic + for chunk_start in range(0, file_size, chunk_size): + chunk_end = min(chunk_start + chunk_size, file_size) + chunk_data = f.read(chunk_end - chunk_start) - resp = requests.post(url=url, - headers=headers, - data=payload) - return resp + # Retry logic with counter and backoff time + retry_count = 0 + while retry_count < retries: + try: - except FileNotFoundError: - error = Error(FILE_NOT_FOUND.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) + chunk_data_b64 = base64.b64encode(chunk_data).decode('utf8') + upload_chunk_payload['chunkNo'] = chunk_number + upload_chunk_payload['data'] = chunk_data_b64 + upload_chunk_payload = json.dumps(upload_chunk_payload) + + g_utils.logger.debug(f"File upload payload (upload-chunk) - chunk No {chunk_number}: {upload_chunk_payload}") + + response = requests.post(upload_chunk_url, + headers=headers, + data=upload_chunk_payload) + + if response.status_code == 200: + chunk_number += 1 + g_utils.logger.info(f"Uploaded chunk {chunk_start // chunk_size + 1} of {(file_size // chunk_size) + 1}") + break # Successful upload, break retry loop + + if retry_count < retries: + g_utils.logger.info(f"Retrying chunk upload in 5 seconds...") + sleep(5) # Implement backoff time between retries + retry_count += 1 + + except Exception as e: + error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value,ERROR_ID,str(e))) + error_handler.enqueue_error(error=error) + + except Exception as e: + error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value,ERROR_ID,str(e))) error_handler.enqueue_error(error=error) return None - except requests.exceptions.Timeout: - error = Error(REGISTRATION_TIMEOUT_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) - error_handler.enqueue_error(error=error) - return None - except requests.exceptions.TooManyRedirects: - error = Error(TOO_MANY_REDIRECTS_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value)) - error_handler.enqueue_error(error=error) - return None + + # + # End upload session + # + + end_session_url = os.path.join(base_url, "api/device/data/end-session") + end_session_payload = file_json['end_session'] + end_session_payload['sessionId'] = session_id + end_session_payload['completedAt'] = int(datetime.now(timezone.utc).timestamp()*1000) + headers = { + 'Authorization': BEARER_HOLDER.format(access_token), + 'Content-Type': CONTENT_TYPE, + 'User-Agent': USER_AGENT + } + try: + end_session_payload = json.dumps(end_session_payload) + g_utils.logger.debug(f"Device log upload payload (end-session): {end_session_payload}") + response = requests.post(end_session_url, + headers=headers, + data=end_session_payload) + + if response.status_code != 200: + raise Exception(f"Error while ending upload session: {response.status_code} - {response.text}") + except Exception as e: - error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value, - ErrorIDs.CS_DEVICE_LOG_ERROR.value, - str(e))) + error = Error(GENERAL_EXCEPTION_HOLDER.format(OutboundMessageIDs.CS2UI_ERROR.value,ERROR_ID,str(e))) error_handler.enqueue_error(error=error) return None + + g_utils.logger.info(f"File {file_json['start_session']['metadata']['deviceFileName']} uploaded.") + return str(file_json['start_session']['metadata']['deviceFileName']) Index: cloudsync/handlers/ui_cs_request_handler.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/handlers/ui_cs_request_handler.py (.../ui_cs_request_handler.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/handlers/ui_cs_request_handler.py (.../ui_cs_request_handler.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -265,17 +265,18 @@ self.logger.debug('hd: {0},dg: {1},sw: {2}'.format(hd_serial_number, dg_serial_number, sw_version)) - device_log_json = helpers_construct_device_log_json(message.parameters[0]) + device_log_data = { + "path": message.parameters[0], + "serialNumber": message.g_config[CONFIG_DEVICE][CONFIG_DEVICE_HD_SERIAL], + "checksum": local_checksum + } - if device_log_json: - device_log_json['serialNumber'] = message.g_config[CONFIG_DEVICE][CONFIG_DEVICE_HD_SERIAL] + g_utils.logger.debug("Device log data {0}".format(device_log_data)) - g_utils.logger.debug("Device log {0}".format(device_log_json)) - helpers_add_to_network_queue(network_request_handler=self.network_request_handler, request_type=NetworkRequestType.CS2DCS_REQ_SEND_DEVICE_LOG, url='', - payload=device_log_json, + payload=device_log_data, method='', g_config=message.g_config, success_message='CS2DCS_REQ_SEND_DEVICE_LOG request added to network queue') Index: cloudsync/utils/globals.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/utils/globals.py (.../globals.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/utils/globals.py (.../globals.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -14,8 +14,6 @@ CONFIG_DEVICE_SW_VERSION = 'sw_version' CONFIG_DEVICE_MODE = 'mode' CONFIG_DEVICE_STATE = 'device_state' -CONFIG_LOG_LEVEL = 'log_level' -CONFIG_LOG_LEVEL_DURATION = 'log_level_duration' CONFIG_KEBORMED = "kebormed_paas" CONFIG_KEBORMED_MFT_URL = "url_mft" @@ -25,6 +23,15 @@ CONFIG_KEBORMED_REACHABILITY_URL = "url_reachability" CONFIG_KEBORMED_DIA_ORG_ID = "dia_org_id" +CONFIG_LOGS = "logs" +CONFIG_LOGS_DEFAULT_LOG_LEVEL = "default_log_level" +CONFIG_LOGS_DEFAULT_LOG_LEVEL_DURATION = "default_log_level_duration" +CONFIG_LOGS_CURRENT_LOG_LEVEL = "current_log_level" +CONFIG_LOGS_LOG_LEVEL_DURATION = "log_level_duration" +CONFIG_LOGS_START_TIMESTAMP = "log_level_start_timestamp" +CONFIG_LOGS_STOP_TIMESTAMP = "log_level_stop_timestamp" +CONFIG_LOGS_UPDATE_DCS_FLAG = "update_dcs_flag" + # DEFAULTS DEFAULT_REACHABILITY_URL = "https://google.com" @@ -61,7 +68,7 @@ # UI2CS VALUES UI2CS_FILE_CHANNELS_PATH = "/media/sd-card/cloudsync" # PATH for running off device -# UI2CS_FILE_CHANNELS_PATH = "data/busses" +UI2CS_FILE_CHANNELS_PATH = "data/busses" # TREATMENT REPORT SECTIONS @@ -123,15 +130,9 @@ # TREATMENT TEMPLATE PATH TREATMENT_REPORT_TEMPLATE_PATH = "cloudsync/config/treatment_report_template.json" -# DEVICE LOGS TEMPLATE PATH -DEVICE_LOG_TEMPLATE_PATH = "cloudsync/config/device_log_template.json" +# LOGS UPLOAD TEMPLATE PATH +LOG_UPLOAD_TEMPLATE_PATH = "cloudsync/config/log_upload_template.json" -# CS LOGS TEMPLATE PATH -CS_LOG_TEMPLATE_PATH = "cloudsync/config/device_log_template.json" - -# CS LOG STATE FILE PATH -CS_LOG_STATE_FILE_PATH = "cloudsync/config/log_state.json" - # USER_AGENT USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" CONTENT_TYPE = "application/json" @@ -152,4 +153,3 @@ # TIME CONSTANTS S_MS_CONVERSION_FACTOR = 1000 - Index: cloudsync/utils/helpers.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/utils/helpers.py (.../helpers.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/utils/helpers.py (.../helpers.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -7,6 +7,7 @@ import socket import re import base64 +import uuid from datetime import * from time import time, sleep @@ -148,16 +149,16 @@ def helpers_device_state_to_cloud_state(hd_mode: HDOpModes, hd_sub_mode: HDOpSubModes) -> DeviceStates: """ - Inactive Not OK – N/A – HD f/w will not know active vs. inactive – UI or cloud will have to maintain assigned tenant, active/inactive and Ok/Not OK while inactive - Inactive OK – N/A - Active OK – N/A + Inactive Not OK - N/A - HD f/w will not know active vs. inactive - UI or cloud will have to maintain assigned tenant, active/inactive and Ok/Not OK while inactive + Inactive OK - N/A + Active OK - N/A - Active Ready – mode is 3 (standby) and sub-mode is 1 (wait for treatment) - Active In Treatment – mode between 4 and 7 (treatment params .. post treatment) - Active Not Ready – mode/sub-mode is anything other than ready, in-treatment, or not OK - Active Not OK – mode is 0 (fault) + Active Ready - mode is 3 (standby) and sub-mode is 1 (wait for treatment) + Active In Treatment - mode between 4 and 7 (treatment params .. post treatment) + Active Not Ready - mode/sub-mode is anything other than ready, in-treatment, or not OK + Active Not OK - mode is 0 (fault) - Decommissioned – N/A – HD f/w will not know if system is decommissioned + Decommissioned - N/A - HD f/w will not know if system is decommissioned """ if hd_mode == HDOpModes.MODE_STAN: return DeviceStates.ACTIVE_READY @@ -248,58 +249,11 @@ template = json.load(f) return template else: - g_utils.logger.error("Configuration file does not exist: {0}".format(path)) - return {} - - -def helpers_read_device_log_template(path: str) -> dict: - """ - Read the device log template - :param path: the path to the template - :return: the loaded template - """ - if os.path.exists(path): - with open(path, 'r') as f: - template = json.load(f) - return template - else: g_utils.logger.error( "Configuration file does not exist: {0}".format(path)) return {} -def helpers_read_cs_log_template(path: str) -> dict: - """ - Read the cs log template - :param path: the path to the template - :return: the loaded template - """ - if os.path.exists(path): - with open(path, 'r') as f: - template = json.load(f) - return template - else: - g_utils.logger.error( - "Configuration file does not exist: {0}".format(path)) - return {} - - -def helpers_read_log_state(path: str) -> dict: - """ - Read the cs log state file - :param path: the path to the file - :return: the contents of the log state file - """ - if os.path.exists(path): - with open(path, 'r') as f: - template = json.load(f) - return template - else: - g_utils.logger.error( - "Configuration file does not exist: {0}".format(path)) - return {} - - def log_func(func): """ Log the function and the parameters passed to it @@ -316,28 +270,6 @@ return _wrapper -def helpers_extract_device_log_metadata(log_file_name:str) -> Union[dict,None]: - local_date_pattern = r"^\d{8}(?=_)" - local_time_pattern = r"^(?:^.*?)_(\d{6})_" - serial_number_pattern = r"^[a-zA-Z0-9]+_[a-zA-Z0-9]+_([a-zA-Z0-9]+)(?=_)" - device_subtype_pattern = r"^[a-zA-Z0-9]+_[a-zA-Z0-9]+_[a-zA-Z0-9]+_([a-zA-Z0-9]+)(?=)" - log_type_pattern = r"[a-zA-Z0-9]+\.u\.(\w+)(?=)" - - local_date_match = re.search(local_date_pattern, log_file_name) - local_time_match = re.search(local_time_pattern, log_file_name) - serial_number_match = re.search(serial_number_pattern, log_file_name) - device_subtype_match = re.search(device_subtype_pattern, log_file_name) - log_type_match = re.search(log_type_pattern, log_file_name) - - return { - "local_date": local_date_match.group(0) if local_date_match else 'unknown', - "local_time": local_time_match.group(1) if local_time_match else 'unknown', - "serial_number": serial_number_match.group(1) if serial_number_match else 'unknown', - "device_sub_type": device_subtype_match.group(1) if device_subtype_match else 'unknown', - "device_log_type": log_type_match.group(1) if log_type_match else 'unknown' - } - - def helpers_file_to_byte_array(file_path: str) -> bytearray: try: with open(file_path, "rb") as f: @@ -352,88 +284,14 @@ return None -def helpers_construct_cs_log_json(path:str): - """ - Constructs the payload for cs log file uploading - :param path: the path to the log file - :returns: the json payload to be uploaded - """ - cs_log_data_template = helpers_read_cs_log_template(CS_LOG_TEMPLATE_PATH) +def helpers_get_file_size(file_path: str) -> int: + try: + return os.path.getsize(file_path) + except OSError as e: + g_utils.logger.error(f'Error getting file size: {e}') + return False - # Convert the file into byte array - logs_byte_array = helpers_file_to_byte_array(path) - # Get the filename - file_name = os.path.basename(path) - - # Completion and generation timestamp from CS (in miliseconds) - local_timestamp = int(datetime.now(timezone.utc).timestamp()*1000) - - # Start and End timestamps calculation - today = datetime.now(timezone.utc) - start_of_day = int(today.replace(hour=0, minute=0, second=0, microsecond=0).timestamp()*1000) - end_of_day = int(today.replace(hour=23, minute=59, second=59, microsecond=999999).timestamp()*1000) - - # Populate JSON object - cs_log_data_template['metadata']['dataType'] = 'cloud-sync-log-category' - cs_log_data_template['metadata']['deviceFileName'] = file_name - cs_log_data_template['metadata']['startTimestamp'] = start_of_day - cs_log_data_template['metadata']['endTimestamp'] = end_of_day - cs_log_data_template['completedAt'] = local_timestamp - cs_log_data_template['generatedAt'] = local_timestamp - cs_log_data_template['data'] = logs_byte_array - cs_log_data_template['checksum'] = helpers_sha256_checksum(logs_byte_array) - - return cs_log_data_template - - -def helpers_construct_device_log_json(path:str): - """ - Constructs the payload for device log file uploading - :param path: the path to the log file - :returns: the json payload to be uploaded - """ - device_log_data = helpers_read_device_log_template(DEVICE_LOG_TEMPLATE_PATH) - - # Convert the file into byte array - logs_byte_array = helpers_file_to_byte_array(path) - - # Extract metadata from the filename - file_name = os.path.basename(path) - extracted_metadata = helpers_extract_device_log_metadata(file_name) - - # Completion and generation timestamp from CS (in miliseconds) - local_timestamp = int(datetime.now(timezone.utc).timestamp()*1000) - - # Calculate the UTC timestamp based on the local date and time the UI provided (in milliseconds) (NEED BETTER SOLUTION HERE) - if extracted_metadata['local_date'] != 'unknown' or extracted_metadata['local_time'] != 'unknown': - datetime_obj = datetime.strptime(f"{extracted_metadata['local_date']}{extracted_metadata['local_time']}", "%Y%m%d%H%M%S") - # Convert to UTC timestamp - ui_utc_timestamp = int(datetime_obj.timestamp()*1000) - else: - ui_utc_timestamp = local_timestamp - - - # Populate JSON object - if extracted_metadata is not None: - device_log_data['metadata']['dataType'] = 'device-log-category' - device_log_data['metadata']['deviceLogType'] = extracted_metadata['device_log_type'] - device_log_data['metadata']['deviceSubType'] = extracted_metadata['device_sub_type'] - device_log_data['metadata']['deviceFileName'] = file_name - device_log_data['metadata']['startTimestamp'] = ui_utc_timestamp - device_log_data['metadata']['endTimestamp'] = ui_utc_timestamp - device_log_data['completedAt'] = local_timestamp - device_log_data['generatedAt'] = local_timestamp - device_log_data['data'] = logs_byte_array - device_log_data['checksum'] = helpers_sha256_checksum(logs_byte_array) - - return device_log_data - - else: - g_utils.logger.error('Device log file name does not match the pattern') - return None - - def helpers_read_treatment_log_file(path: str): treatment_data = helpers_read_treatment_report_template(TREATMENT_REPORT_TEMPLATE_PATH) @@ -507,6 +365,7 @@ # print('regular line') if line.startswith(TREATMENT_CODE): treatment_data['data']['treatment']['treatmentCode'] = line.split(',')[1] + treatment_data['reference'] = line.split(',')[1] elif line.startswith(PATIENT_ID): treatment_data['data']['patient']['id'] = line.split(',')[1] elif line.startswith(TREATMENT_DURATION): @@ -688,37 +547,188 @@ if os.path.exists(path_to_delete) and os.path.isdir(path_to_delete): shutil.rmtree(path_to_delete) else: - raise FileNotFoundError(f"{path_to_delete} not found or not a directory!") + raise FileNotFoundError( + f"{path_to_delete} not found or not a directory!") +# LOGGING SPECIFIC HELPERS -def helpers_should_update_dcs_log_level() -> bool: + +def helpers_read_device_log_template(path: str) -> dict: """ + Read the device log template + :param path: the path to the template + :return: the loaded template + """ + if os.path.exists(path): + with open(path, 'r') as f: + template = json.load(f) + return template + else: + g_utils.logger.error( + "Configuration file does not exist: {0}".format(path)) + return {} + + +def helpers_read_cs_log_template(path: str) -> dict: + """ + Read the cs log template + :param path: the path to the template + :return: the loaded template + """ + if os.path.exists(path): + with open(path, 'r') as f: + template = json.load(f) + return template + else: + g_utils.logger.error( + "Configuration file does not exist: {0}".format(path)) + return {} + + +def helpers_extract_device_log_metadata(log_file_name:str) -> Union[dict,None]: + local_date_pattern = r"^\d{8}(?=_)" + local_time_pattern = r"^(?:^.*?)_(\d{6})_" + serial_number_pattern = r"^[a-zA-Z0-9]+_[a-zA-Z0-9]+_([a-zA-Z0-9]+)(?=_)" + device_subtype_pattern = r"^[a-zA-Z0-9]+_[a-zA-Z0-9]+_[a-zA-Z0-9]+_([a-zA-Z0-9]+)(?=)" + log_type_pattern = r"[a-zA-Z0-9]+\.u\.(\w+)(?=)" + + local_date_match = re.search(local_date_pattern, log_file_name) + local_time_match = re.search(local_time_pattern, log_file_name) + serial_number_match = re.search(serial_number_pattern, log_file_name) + device_subtype_match = re.search(device_subtype_pattern, log_file_name) + log_type_match = re.search(log_type_pattern, log_file_name) + + return { + "local_date": local_date_match.group(0) if local_date_match else 'unknown', + "local_time": local_time_match.group(1) if local_time_match else 'unknown', + "serial_number": serial_number_match.group(1) if serial_number_match else 'unknown', + "device_sub_type": device_subtype_match.group(1) if device_subtype_match else 'unknown', + "device_log_type": log_type_match.group(1) if log_type_match else 'unknown' + } + + +def helpers_construct_cs_log_json(cs_log_data: dict): + """ + Constructs the payload for cs log file uploading + :param path: the path to the log file + :returns: the json payload to be uploaded + """ + cs_log_json = helpers_read_cs_log_template(LOG_UPLOAD_TEMPLATE_PATH) + + # Convert the file into byte array + logs_byte_array = helpers_file_to_byte_array(cs_log_data['path']) + checksum = helpers_sha256_checksum(logs_byte_array) + + # Get file size + file_size = helpers_get_file_size(cs_log_data['path']) + + # Get the filename + file_name = os.path.basename(cs_log_data['path']) + + # Completion and generation timestamp from CS (in miliseconds) + local_timestamp = int(datetime.now(timezone.utc).timestamp()*1000) + + # Start and End timestamps calculation + today = datetime.now(timezone.utc) + start_of_day = int(today.replace(hour=0, minute=0, second=0, microsecond=0).timestamp()*1000) + end_of_day = int(today.replace(hour=23, minute=59, second=59, microsecond=999999).timestamp()*1000) + + # Populate JSON object + cs_log_json['start_session']['reference'] = str(uuid.uuid4()) + cs_log_json['start_session']['generatedAt'] = local_timestamp + cs_log_json['start_session']['dataType'] = 'device-data' + cs_log_json['start_session']['serialNumber'] = cs_log_data['serialNumber'] + cs_log_json['start_session']['macAddress'] = 'device-data' + cs_log_json['start_session']['organizationId'] = cs_log_data['organizationId'] + cs_log_json['start_session']['metadata']['dataType'] = 'cloud-sync-log-category' + cs_log_json['start_session']['metadata']['deviceFileName'] = file_name + cs_log_json['start_session']['metadata']['startTimestamp'] = start_of_day + cs_log_json['start_session']['metadata']['endTimestamp'] = end_of_day + cs_log_json['end_session']['checksum'] = checksum + cs_log_json['general']['file_size'] = file_size + cs_log_json['general']['file_path'] = cs_log_data['path'] + + return cs_log_json + + +def helpers_construct_device_log_json(device_log_data: dict): + """ + Constructs the payload for device log file uploading + :param device_log_data: a dictionary with the info related to the current file + :returns: a json payload to be used for uploading using sessions. + """ + device_log_json = helpers_read_device_log_template(LOG_UPLOAD_TEMPLATE_PATH) + + # Convert the file into byte array + logs_byte_array = helpers_file_to_byte_array(device_log_data['path']) + checksum = helpers_sha256_checksum(logs_byte_array) + + # Get file size + file_size = helpers_get_file_size(device_log_data['path']) + + # Extract metadata from the filename + file_name = os.path.basename(device_log_data['path']) + extracted_metadata = helpers_extract_device_log_metadata(file_name) + + # Timestamp for GeneratedAt (in miliseconds) + local_timestamp = int(datetime.now(timezone.utc).timestamp()*1000) + + # Calculate the UTC timestamp based on the local date and time the UI provided (in milliseconds) (NEED BETTER SOLUTION HERE) + if extracted_metadata['local_date'] != 'unknown' or extracted_metadata['local_time'] != 'unknown': + datetime_obj = datetime.strptime(f"{extracted_metadata['local_date']}{extracted_metadata['local_time']}", "%Y%m%d%H%M%S") + # Convert to UTC timestamp + ui_utc_timestamp = int(datetime_obj.timestamp()*1000) + else: + ui_utc_timestamp = local_timestamp + + # Populate JSON object + if extracted_metadata is not None: + device_log_json['start_session']['reference'] = str(uuid.uuid4()) + device_log_json['start_session']['generatedAt'] = local_timestamp + device_log_json['start_session']['dataType'] = 'device-data' + device_log_json['start_session']['serialNumber'] = device_log_data['serialNumber'] + device_log_json['start_session']['macAddress'] = 'device-data' + device_log_json['start_session']['organizationId'] = device_log_data['organizationId'] + device_log_json['start_session']['metadata']['dataType'] = 'device-log-category' + device_log_json['start_session']['metadata']['deviceLogType'] = extracted_metadata['device_log_type'] + device_log_json['start_session']['metadata']['deviceSubType'] = extracted_metadata['device_sub_type'] + device_log_json['start_session']['metadata']['deviceFileName'] = file_name + device_log_json['start_session']['metadata']['startTimestamp'] = ui_utc_timestamp + device_log_json['start_session']['metadata']['endTimestamp'] = ui_utc_timestamp + device_log_json['end_session']['checksum'] = checksum + device_log_json['general']['file_size'] = file_size + device_log_json['general']['file_path'] = device_log_data['path'] + + return device_log_json + + else: + g_utils.logger.error('Device log file name does not match the pattern') + return None + + +def helpers_should_update_dcs_log_level(g_config: dict) -> bool: + """ Returns True if the state of the log level should be communicated to the DCS else False. It is controlled by - the update_dcs_flag in the log_state.json. + the update_dcs_flag in the config.json. * 1 --> True * 0 --> False """ - log_state = helpers_read_log_state(CS_LOG_STATE_FILE_PATH) - if int(log_state['update_dcs_flag']) == 1: + update_dcs_flag = g_config[CONFIG_LOGS][CONFIG_LOGS_UPDATE_DCS_FLAG] + if int(update_dcs_flag) == 1: return True return False -def helpers_should_update_cs_log_level(response:dict) -> bool: - current_log_state = helpers_read_log_state(CS_LOG_STATE_FILE_PATH) - current_log_level:str = current_log_state['current_log_level'] - requested_log_level:str = response['logLevel'] - if requested_log_level.upper() != current_log_level.upper(): +def helpers_should_update_cs_log_level(response: dict, g_config: dict) -> bool: + current_log_level:str = g_config[CONFIG_LOGS][CONFIG_LOGS_CURRENT_LOG_LEVEL].upper() + requested_log_level:str = response['logLevel'].upper() + if requested_log_level != current_log_level: g_utils.logger.debug(f"DCS requested to change the log level from {current_log_level} to {requested_log_level}") return True return False -def helpers_write_log_state(file_path: str, config: dict) -> None: - with open(file_path, 'w') as f: - json.dump(config, f, indent=4) - def helpers_calculate_log_level_duration(response): """ Returns a tuple with duration_in_seconds, start_timestamp, end_timestamp Index: cloudsync/utils/logging.py =================================================================== diff -u -r3736c29ba097740b4e99684625e78bfaaac6b1a1 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cloudsync/utils/logging.py (.../logging.py) (revision 3736c29ba097740b4e99684625e78bfaaac6b1a1) +++ cloudsync/utils/logging.py (.../logging.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -1,15 +1,13 @@ import logging import os -from logging.handlers import TimedRotatingFileHandler +import threading from cloudsync.handlers.logs_handler import CustomTimedRotatingFileHandlerHandler from cloudsync.utils.singleton import SingletonMeta -from cloudsync.handlers.error import Error from cloudsync.utils.globals import * from cloudsync.utils.helpers import * from cloudsync.common.enums import * - class LoggingConfig(metaclass=SingletonMeta): """ Encapsulates logging configuration and setup. @@ -24,13 +22,16 @@ 40: "ERROR", 50: "CRITICAL" } + _countdown_thread = None + _stop_countdown_thread_event = None def __init__(self): self.flask_app = None self.log_level = self.DEFAULT_LOG_LEVEL self.log_level_name = None self.log_level_duration = None self.log_handler = None + self.g_config = None def initiate(self, app): self.flask_app = app @@ -54,17 +55,17 @@ # Set the log level to the custom log handler self.log_handler.setLevel(self.log_level) # Set the log level the the flask logger - self.flask_app.logger.setLevel(self.log_level) + # self.flask_app.logger.setLevel(self.log_level) # Set the log level ot the root logger root_logger = logging.getLogger() root_logger.setLevel(self.log_level) # Update the log state file update_obj = { - 'attribute': 'current_log_level', + 'attribute': CONFIG_LOGS_CURRENT_LOG_LEVEL, 'value': self.log_level_name } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) g_utils.logger.info(f'Log level set at {self.log_level_name} ({self.log_level})') @@ -73,7 +74,7 @@ def set_log_handler(self): # Add the handler to the Flask app logger - self.flask_app.logger.addHandler(self.log_handler) + # self.flask_app.logger.addHandler(self.log_handler) # Add the handler to the root logger root_logger = logging.getLogger() root_logger.addHandler(self.log_handler) @@ -83,48 +84,47 @@ 'attribute': 'update_dcs_flag', 'value': flag_state } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) def set_log_duration(self, duration: Union[int, tuple]): if isinstance(duration, tuple): self.log_level_duration = int(duration[0]) # Update the duration at log state file update_obj = { - 'attribute': 'log_level_duration', + 'attribute': CONFIG_LOGS_LOG_LEVEL_DURATION, 'value': duration[0] } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) # Update the start timestamp at log state file update_obj = { - 'attribute': 'log_level_start_timestamp', + 'attribute': CONFIG_LOGS_START_TIMESTAMP, 'value': duration[1] } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) # Update the stop timestamp at log state file update_obj = { - 'attribute': 'log_level_stop_timestamp', + 'attribute': CONFIG_LOGS_STOP_TIMESTAMP, 'value': duration[2] } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) else: self.log_level_duration = int(duration) # Update the duration at log state file update_obj = { - 'attribute': 'log_level_duration', + 'attribute': CONFIG_LOGS_LOG_LEVEL_DURATION, 'value': duration } - self.__update_log_state_file(update_object=update_obj) + self.__update_log_state(update_object=update_obj) g_utils.logger.debug(f"Log level duration set at {self.log_level_duration}") def get_log_level(self) -> tuple: """ - Returns a tuple with both the numeric and text - value of the log level. The first element of the tuple - is the numeric value and the second one is the text. + Returns an object with both the `numeric_value` and `text_value` + of the log level. """ - return self.log_level, self.log_level_name + return { "numeric_value": self.log_level, "text_value": self.log_level_name } def set_network_provider(self, network_request_handler) -> None: """ @@ -142,9 +142,11 @@ def set_configuration(self, g_config) -> None: """ - Passes the configuration tothe custom log handler so it can + Passes the configuration to the custom log handler so it can be able to send uploading log requests to the dcs for CS logs. + Also it stores it locally as indicator that the config file has been read. """ + self.g_config = g_config self.log_handler.set_configuration(g_config=g_config) def clear_prelogger(self) -> None: @@ -155,12 +157,29 @@ def start_countdown(self) -> None: if self.log_level and self.log_level_duration: + # Kill the previous running thread + if self._countdown_thread is not None and self._countdown_thread.is_alive(): + self._stop_countdown_thread_event.set() + try: - # Initiate the log level reset thread - Thread(target=self.__reset_log_level, args=(self.log_level_duration,)).start() + self._stop_countdown_thread_event = threading.Event() + new_thread = threading.Thread(target=self.__reset_log_level, args=(self.log_level_duration, self._stop_countdown_thread_event,)) + new_thread.start() + self._countdown_thread = new_thread except Exception as e: g_utils.logger.error(f"Failed starting the countdown thread: {e}") + def revert_to_error(self) -> None: + if self._countdown_thread is not None and self._countdown_thread.is_alive(): + self._stop_countdown_thread_event.set() + + self.set_log_level(self.DEFAULT_LOG_LEVEL) + self.set_dcs_flag(1) + self.set_log_duration((0,0,0)) # clear out the log state file + self._countdown_thread = None + self._stop_countdown_thread_event = None + g_utils.logger.info(f"Logger level reverted to: {self.DEFAULT_LOG_LEVEL}") + def __configure_logging(cls): """ * Sets up the file rotation Handler @@ -196,21 +215,36 @@ # Set the custom handler as global handler cls.set_log_handler() # Set the g_utils logger - g_utils.add_logger(cls.flask_app.logger) + # g_utils.add_logger(cls.flask_app.logger) + g_utils.add_logger(logging.getLogger()) # Set the log level globally cls.set_log_level(cls.log_level) - def __update_log_state_file(cls, update_object:dict): - log_state_file = helpers_read_log_state(CS_LOG_STATE_FILE_PATH) - log_state_file[update_object['attribute']] = update_object['value'] - helpers_write_log_state(CS_LOG_STATE_FILE_PATH, log_state_file) + def __update_log_state(cls, update_object:dict): - def __reset_log_level(cls, duration:int): + # Update the log state ONLY if the config file has been loaded. + if cls.g_config is not None: + cls.g_config[CONFIG_LOGS][update_object['attribute']] = update_object['value'] + + if cls.g_config[CONFIG_DEVICE][CONFIG_DEVICE_MODE] == 'operation': + helpers_write_config(OPERATION_CONFIG_PATH, OPERATION_CONFIG_FILE_PATH, cls.g_config) + else: + helpers_write_config(None, CONFIG_PATH, cls.g_config) + helpers_write_config(OPERATION_CONFIG_PATH, OPERATION_CONFIG_FILE_PATH, cls.g_config) + + def __reset_log_level(cls, duration:int, stop_event: threading.Event): """ Resets the logger level to the specified default level after a given time interval. """ - g_utils.logger.info(f"Logger level set to: {cls.log_level_name} for {duration} seconds") - sleep(duration) - cls.set_log_level(cls.DEFAULT_LOG_LEVEL) - cls.set_dcs_flag(1) - g_utils.logger.info(f"Logger level reverted to: {cls.DEFAULT_LOG_LEVEL}") + try: + g_utils.logger.info(f"Logger level set to: {cls.log_level_name} for {duration} seconds") + while duration > 0: # Loop instead of sleep for interruptibility + duration -= 1 + sleep(1) + if stop_event.is_set(): + return + if not threading.current_thread().is_alive(): + return + cls.revert_to_error() + except InterruptedError: + g_utils.logger.info("Countdown thread interrupted.") Index: cs.py =================================================================== diff -u -re2b5bba1ace2613e8cd3ca6d997756b1b61d77a4 -r34a18ebe349364c4e0462d876d4d191bf3df8939 --- cs.py (.../cs.py) (revision e2b5bba1ace2613e8cd3ca6d997756b1b61d77a4) +++ cs.py (.../cs.py) (revision 34a18ebe349364c4e0462d876d4d191bf3df8939) @@ -30,17 +30,29 @@ if cs_proc: print("CloudSync app already running") else: - print("Starting CloudSync app with logging level {0}".format(logging_level)) - time.sleep(DELAY) + print(f"Starting CloudSync app with logging level {logging_level}") Popen(['python3', 'cloud_sync.py', str(logging_level)]) + # Keep the main process running + while True: + # Check if the subprocess is still running or perform other periodic checks + time.sleep(60) # Adjust the sleep time as needed def stop(): cs_proc_pid = get_pid() if cs_proc_pid: print("Stopping CloudSync app...") time.sleep(DELAY) - os.kill(int(cs_proc_pid), signal.SIGKILL) + try: + os.kill(int(cs_proc_pid), signal.SIGKILL) + print(f"Successfully killed process {cs_proc_pid}") + except PermissionError: + print( + f"Permission denied when trying to kill process {cs_proc_pid}") + except ProcessLookupError: + print(f"No process with PID {cs_proc_pid}") + except Exception as e: + print(f"Error killing process {cs_proc_pid}: {str(e)}") else: print("CloudSync app is not running.")