Index: cloudsync/handlers/cs_mft_dcs_request_handler.py =================================================================== diff -u -r3ecfe17da0e7fb115cc97ed14014d9c13aa87a8f -rf6c1d0be3c250785357d92318fa0662656f0f61d --- cloudsync/handlers/cs_mft_dcs_request_handler.py (.../cs_mft_dcs_request_handler.py) (revision 3ecfe17da0e7fb115cc97ed14014d9c13aa87a8f) +++ cloudsync/handlers/cs_mft_dcs_request_handler.py (.../cs_mft_dcs_request_handler.py) (revision f6c1d0be3c250785357d92318fa0662656f0f61d) @@ -425,6 +425,14 @@ # Step #3 - upload the device log file in chunks # Duplicate detection relies on 409 at start-session (immediate, no wasted chunks). + if not os.path.exists(device_log_data['path']): + self.logger.warning(f"Device log file no longer available: {device_log_data['path']}") + error = Error.general(OutboundMessageIDs.CS2UI_ERROR.value, + ErrorIDs.CS_DEVICE_LOG_ERROR.value, + f"Device log file no longer available: {device_log_data['path']}") + self.error_handler.enqueue_error(error=error) + return + device_log_json = helpers_construct_device_log_json(device_log_data) upload_result = cmd_outgoing_upload_file_in_chunks(base_url=base_url, @@ -516,6 +524,10 @@ # Step #3 - upload the cs log file + if not os.path.exists(cs_log_data['path']): + self.logger.warning(f"CS log file no longer available: {cs_log_data['path']}") + return + cs_log_json = helpers_construct_cs_log_json(cs_log_data) cs_log_filename = cmd_outgoing_upload_file_in_chunks(base_url=base_url, @@ -532,8 +544,18 @@ if isinstance(cs_log_filename, dict) and not cs_log_filename.get("accepted"): self.logger.info(f"CS log file rejected as duplicate: {cs_log_filename.get('filename')}") + try: + os.remove(cs_log_data['path']) + self.logger.info(f"Duplicate CS log file cleaned up: {cs_log_data['path']}") + except OSError: + pass # already removed by log retention elif isinstance(cs_log_filename, str): self.logger.debug(f"CS log file uploaded: {cs_log_filename}") + try: + os.remove(cs_log_data['path']) + self.logger.info(f"Uploaded CS log file cleaned up: {cs_log_data['path']}") + except OSError: + pass else: error = Error.general(OutboundMessageIDs.CS2UI_ERROR.value, Index: cloudsync/handlers/logs_handler.py =================================================================== diff -u -ra923c3f28c864ca51557deeabebcaa576147ae4c -rf6c1d0be3c250785357d92318fa0662656f0f61d --- cloudsync/handlers/logs_handler.py (.../logs_handler.py) (revision a923c3f28c864ca51557deeabebcaa576147ae4c) +++ cloudsync/handlers/logs_handler.py (.../logs_handler.py) (revision f6c1d0be3c250785357d92318fa0662656f0f61d) @@ -3,14 +3,17 @@ from cloudsync.utils.globals import * from logging.handlers import TimedRotatingFileHandler -from time import time +from time import time, sleep +from threading import Thread class CustomTimedRotatingFileHandlerHandler(TimedRotatingFileHandler): """ Handler for logging to a file, rotating the log file and uploading it to cloud at certain timed intervals. It extends the official TimedRotatingFileHandler to add the upload functionality. """ + _upload_in_progress = False + def __init__(self, filename, prelogger: Logger, *args, **kwargs): super().__init__(filename, *args, **kwargs) self.network_request_handler = None @@ -34,7 +37,13 @@ def __select_files_to_upload(cls): """ Checks for existing rotated files in the directory (excluding the newly created one) and uploads them. + Spawns a background thread to upload files sequentially, waiting for each to complete + before starting the next. This prevents queue saturation when multiple backlogged + files exist (e.g. after extended offline periods). """ + if cls._upload_in_progress: + return # previous batch still draining + # We use the cls.baseFilename because according to code documentation: # the filename passed in, could be a path object (see Issue #27493), # but cls.baseFilename will be a string @@ -46,9 +55,29 @@ existing_files.append(os.path.join(os.path.dirname(cls.baseFilename), filename)) if len(existing_files) > 0: - for existing_file in existing_files: - cls.__upload_cs_log_file(existing_file) + existing_files.sort(key=lambda x: os.path.getmtime(x)) # oldest first + cls._upload_in_progress = True + Thread(target=cls.__upload_files_sequentially, + args=(existing_files,), daemon=True).start() + def __upload_files_sequentially(cls, files): + """ + Uploads CS log files one at a time, waiting for the network queue to drain + between each upload. This ensures all backlogged files are uploaded without + saturating the single-slot network queue. + """ + try: + for f in files: + if not os.path.exists(f): + continue # deleted by log retention or previous upload + cls.__upload_cs_log_file(f) + # Wait for network queue to drain before next file + while len(cls.network_request_handler.queue) > 0: + sleep(1) + sleep(2) # yield to bus-initiated requests + finally: + cls._upload_in_progress = False + def __upload_cs_log_file(cls, log_file_path): cs_log_data = {