From 25dcd27c6955b06dcf8a7357f232d9a39aa568b3 Mon Sep 17 00:00:00 2001 From: vvzvlad Date: Sat, 12 Apr 2025 00:02:37 +0300 Subject: [PATCH] Refactor check_log --- checker.py | 159 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 94 insertions(+), 65 deletions(-) diff --git a/checker.py b/checker.py index 9cad7b7..c773cf5 100644 --- a/checker.py +++ b/checker.py @@ -41,8 +41,6 @@ import colorama import logging import socket -STATE_FILE_PATH = '/root/node/sync_state_2.json' - def self_update(logger): logger.info("Checking for updates..") script_path = os.path.abspath(__file__) @@ -132,34 +130,6 @@ class GRIST: return record.Value raise ValueError(f"Setting {key} not found") -def read_state(logger): - """Reads the last known state (status and sync count) from the state file.""" - default_state = {"last_status": "Idle", "sync_count": 0} - if not os.path.exists(STATE_FILE_PATH): - logger.info(f"State file {STATE_FILE_PATH} not found, starting with default state.") - return default_state - try: - with open(STATE_FILE_PATH, 'r', encoding='utf-8') as f: - state = json.load(f) - # Validate state structure - if "last_status" not in state or "sync_count" not in state: - logger.warning(f"Invalid state file format in {STATE_FILE_PATH}, using default state.") - return default_state - # Ensure sync_count is an integer - state["sync_count"] = int(state["sync_count"]) - return state - except (json.JSONDecodeError, ValueError, IOError) as e: - logger.error(f"Error reading or parsing state file {STATE_FILE_PATH}: {e}. Using default state.") - return default_state - -def write_state(state, logger): - """Writes the current state to the state file.""" - try: - with open(STATE_FILE_PATH, 'w', encoding='utf-8') as f: - json.dump(state, f, indent=4) - except IOError as e: - logger.error(f"Error writing state file {STATE_FILE_PATH}: {e}") - def clean_ansi(text): ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])') return ansi_escape.sub('', text) @@ -173,11 +143,23 @@ def format_number(number_str): except (ValueError, TypeError): return "NaN" # Or some other indicator of invalid input -def check_logs(logger): - state = read_state(logger) - original_sync_count = state["sync_count"] - previous_status = state["last_status"] - +def check_logs(logger, initial_sync_count, previous_status): + """ + Checks docker logs for node status (Syncing, OK, Idle) and updates sync count. + + Args: + logger: The logger instance. + initial_sync_count: The sync count read from Grist at the start. + previous_status: The last known status read from Grist ('Sync', 'OK', 'Idle', or others). + + Returns: + A dictionary containing: + - status_message: A string describing the current status (e.g., "Sync: 123k (5)"). + - current_status_type: The type of the current status ('Sync', 'OK', 'Idle', 'Error'). + - current_sync_count: The updated sync count. + """ + current_sync_count = initial_sync_count # Initialize with the value from Grist + try: logs = subprocess.run(['docker', 'logs', '--since', '10m', 'infernet-node'], capture_output=True, text=True, check=True) log_content = clean_ansi(logs.stdout) @@ -210,8 +192,8 @@ def check_logs(logger): match = head_sub_pattern.search(line) if match: - last_head_sub_id = match.group(1) - # No continue here, allows checking_info from same timeframe to override + last_head_sub_id = match.group(1) + # No continue here, allows checking_info from same timeframe to override current_status_type = "Idle" status_message = "Idle" @@ -220,29 +202,30 @@ def check_logs(logger): formatted_id = format_number(last_checking_info["last_sub_id"]) if last_checking_info["num_subs_to_sync"] > 0: current_status_type = "Sync" - status_message = f"Sync: {formatted_id} ({state['sync_count']})" + status_message = f"Sync: {formatted_id} ({current_sync_count})" # Use current_sync_count logger.info(f"Node is syncing. Last sub ID: {last_checking_info['last_sub_id']}, Num subs to sync: {last_checking_info['num_subs_to_sync']}") else: current_status_type = "OK" - # Increment count only on transition from Sync to OK based on the *last* status type + # Increment count only on transition from Sync to OK if previous_status == "Sync": - state["sync_count"] += 1 - logger.info(f"Sync completed. Sync count incremented to {state['sync_count']}.") - status_message = f"OK: {formatted_id} ({state['sync_count']})" + current_sync_count += 1 # Increment local count + logger.info(f"Sync completed. Sync count incremented to {current_sync_count}.") + status_message = f"OK: {formatted_id} ({current_sync_count})" # Use current_sync_count logger.info(f"Node is OK. Last sub ID: {last_checking_info['last_sub_id']}") elif last_ignored_id: # Fallback to "Ignored" logs if "Checking" is missing formatted_id = format_number(last_ignored_id) current_status_type = "Sync" # Assume sync if we only see ignored creations recently - status_message = f"Sync: {formatted_id} ({state['sync_count']})" + status_message = f"Sync: {formatted_id} ({current_sync_count})" # Use current_sync_count logger.info(f"Node possibly syncing (based on ignored logs). Last ignored ID: {last_ignored_id}") elif last_head_sub_id: # Fallback to "head sub id" if others are missing formatted_id = format_number(last_head_sub_id) current_status_type = "OK" # Assume OK if this is the latest relevant info - status_message = f"OK: {formatted_id} ({state['sync_count']})" + # Don't increment sync count here, only on Sync -> OK transition based on "Checking" logs + status_message = f"OK: {formatted_id} ({current_sync_count})" # Use current_sync_count logger.info(f"Node status based on head sub id. Head sub ID: {last_head_sub_id}") else: @@ -250,21 +233,31 @@ def check_logs(logger): status_message = "Idle" current_status_type = "Idle" - # Update state and write back if changed - state["last_status"] = current_status_type - if state["sync_count"] != original_sync_count or state["last_status"] != previous_status: - write_state(state, logger) - - return {"status": status_message} + # Return the results instead of writing to a file + return { + "status_message": status_message, + "current_status_type": current_status_type, + "current_sync_count": current_sync_count + } except subprocess.CalledProcessError as e: + error_msg = f"Error: Docker logs failed ({e.returncode})" logger.error(f"Error running docker logs command: {e.stderr or e.stdout or e}") - # Don't update state file on command error, keep previous state - return {"status": f"Error: Docker logs failed ({e.returncode})"} + # Return error status and original sync count + return { + "status_message": error_msg, + "current_status_type": "Error", + "current_sync_count": initial_sync_count # Return original count on error + } except Exception as e: + error_msg = "Error: Log processing failed" logger.error(f"Unexpected error processing logs: {e}", exc_info=True) - # Don't update state file on unexpected error - return {"status": f"Error: Log processing failed"} + # Return error status and original sync count + return { + "status_message": error_msg, + "current_status_type": "Error", + "current_sync_count": initial_sync_count # Return original count on error + } if __name__ == "__main__": colorama.init(autoreset=True) @@ -291,20 +284,56 @@ if __name__ == "__main__": current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0] def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE) + # Get initial state from Grist + initial_sync_count = int(current_vm.Syncs or 0) + # Determine previous status type based on Health string (simplified) + previous_health_status = current_vm.Health or "Idle" + previous_status_type = "Idle" # Default + if previous_health_status.startswith("Sync"): + previous_status_type = "Sync" + elif previous_health_status.startswith("OK"): + previous_status_type = "OK" + elif previous_health_status.startswith("Error"): + previous_status_type = "Error" # Consider error state + + logger.info(f"Initial state from Grist - Syncs: {initial_sync_count}, Health: {previous_health_status} (interpreted as: {previous_status_type})") + for attempt in range(3): - try: - result = check_logs(logger) - grist_callback({ "Health": result["status"] }) - logger.info(f"Status: {result['status']} ()") - if result["status"] == "Idle": + try: + # Pass initial state to check_logs + result = check_logs(logger, initial_sync_count, previous_status_type) + + grist_updates = {"Health": result["status_message"]} + + # Update Syncs count in Grist only if it changed + if result["current_sync_count"] != initial_sync_count: + grist_updates["Syncs"] = result["current_sync_count"] + logger.info(f"Sync count changed from {initial_sync_count} to {result['current_sync_count']}") + + # Send updates to Grist + grist_callback(grist_updates) + logger.info(f"Status update sent: {grist_updates}") + + # Reboot logic (remains mostly the same, reads Reboots from current_vm) + if result["current_status_type"] == "Idle": # Check type, not message uptime_seconds = os.popen("cat /proc/uptime | cut -d'.' -f1").read() uptime_seconds = int(uptime_seconds) if uptime_seconds > 60*60*1: - grist_callback({ "Health": "Rebooting" }) + reboot_count = int(current_vm.Reboots or 0) + reboot_count += 1 + # Include reboot count in the final Grist update before rebooting + grist_updates = { "Health": "Rebooting", "Reboots": reboot_count } + grist_callback(grist_updates) + logger.info(f"Idle detected for >1 hour (uptime: {uptime_seconds}s). Rebooting. Reboot count: {reboot_count}") os.system("reboot") - except Exception as e: - logger.error(f"Error on attempt {attempt+1}/3: {e}") + break # Exit loop on success + except Exception as e: + logger.error(f"Error in main loop, attempt {attempt+1}/3: {e}", exc_info=True) if attempt == 2: - grist_callback({ "Health": f"Error: {e}" }) - if attempt < 2: - time.sleep(5) \ No newline at end of file + # Log final error to Grist on last attempt + try: + grist_updates = { "Health": f"Error: Main loop failed - {e}" } + grist_callback(grist_updates) + except Exception as grist_e: + logger.error(f"Failed to log final error to Grist: {grist_e}") + time.sleep(5) # Wait before retrying