Refactor check_log
This commit is contained in:
parent
dcd3a62e3d
commit
25dcd27c69
149
checker.py
149
checker.py
@ -41,8 +41,6 @@ import colorama
|
|||||||
import logging
|
import logging
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
STATE_FILE_PATH = '/root/node/sync_state_2.json'
|
|
||||||
|
|
||||||
def self_update(logger):
|
def self_update(logger):
|
||||||
logger.info("Checking for updates..")
|
logger.info("Checking for updates..")
|
||||||
script_path = os.path.abspath(__file__)
|
script_path = os.path.abspath(__file__)
|
||||||
@ -132,34 +130,6 @@ class GRIST:
|
|||||||
return record.Value
|
return record.Value
|
||||||
raise ValueError(f"Setting {key} not found")
|
raise ValueError(f"Setting {key} not found")
|
||||||
|
|
||||||
def read_state(logger):
|
|
||||||
"""Reads the last known state (status and sync count) from the state file."""
|
|
||||||
default_state = {"last_status": "Idle", "sync_count": 0}
|
|
||||||
if not os.path.exists(STATE_FILE_PATH):
|
|
||||||
logger.info(f"State file {STATE_FILE_PATH} not found, starting with default state.")
|
|
||||||
return default_state
|
|
||||||
try:
|
|
||||||
with open(STATE_FILE_PATH, 'r', encoding='utf-8') as f:
|
|
||||||
state = json.load(f)
|
|
||||||
# Validate state structure
|
|
||||||
if "last_status" not in state or "sync_count" not in state:
|
|
||||||
logger.warning(f"Invalid state file format in {STATE_FILE_PATH}, using default state.")
|
|
||||||
return default_state
|
|
||||||
# Ensure sync_count is an integer
|
|
||||||
state["sync_count"] = int(state["sync_count"])
|
|
||||||
return state
|
|
||||||
except (json.JSONDecodeError, ValueError, IOError) as e:
|
|
||||||
logger.error(f"Error reading or parsing state file {STATE_FILE_PATH}: {e}. Using default state.")
|
|
||||||
return default_state
|
|
||||||
|
|
||||||
def write_state(state, logger):
|
|
||||||
"""Writes the current state to the state file."""
|
|
||||||
try:
|
|
||||||
with open(STATE_FILE_PATH, 'w', encoding='utf-8') as f:
|
|
||||||
json.dump(state, f, indent=4)
|
|
||||||
except IOError as e:
|
|
||||||
logger.error(f"Error writing state file {STATE_FILE_PATH}: {e}")
|
|
||||||
|
|
||||||
def clean_ansi(text):
|
def clean_ansi(text):
|
||||||
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
|
||||||
return ansi_escape.sub('', text)
|
return ansi_escape.sub('', text)
|
||||||
@ -173,10 +143,22 @@ def format_number(number_str):
|
|||||||
except (ValueError, TypeError):
|
except (ValueError, TypeError):
|
||||||
return "NaN" # Or some other indicator of invalid input
|
return "NaN" # Or some other indicator of invalid input
|
||||||
|
|
||||||
def check_logs(logger):
|
def check_logs(logger, initial_sync_count, previous_status):
|
||||||
state = read_state(logger)
|
"""
|
||||||
original_sync_count = state["sync_count"]
|
Checks docker logs for node status (Syncing, OK, Idle) and updates sync count.
|
||||||
previous_status = state["last_status"]
|
|
||||||
|
Args:
|
||||||
|
logger: The logger instance.
|
||||||
|
initial_sync_count: The sync count read from Grist at the start.
|
||||||
|
previous_status: The last known status read from Grist ('Sync', 'OK', 'Idle', or others).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing:
|
||||||
|
- status_message: A string describing the current status (e.g., "Sync: 123k (5)").
|
||||||
|
- current_status_type: The type of the current status ('Sync', 'OK', 'Idle', 'Error').
|
||||||
|
- current_sync_count: The updated sync count.
|
||||||
|
"""
|
||||||
|
current_sync_count = initial_sync_count # Initialize with the value from Grist
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logs = subprocess.run(['docker', 'logs', '--since', '10m', 'infernet-node'], capture_output=True, text=True, check=True)
|
logs = subprocess.run(['docker', 'logs', '--since', '10m', 'infernet-node'], capture_output=True, text=True, check=True)
|
||||||
@ -220,29 +202,30 @@ def check_logs(logger):
|
|||||||
formatted_id = format_number(last_checking_info["last_sub_id"])
|
formatted_id = format_number(last_checking_info["last_sub_id"])
|
||||||
if last_checking_info["num_subs_to_sync"] > 0:
|
if last_checking_info["num_subs_to_sync"] > 0:
|
||||||
current_status_type = "Sync"
|
current_status_type = "Sync"
|
||||||
status_message = f"Sync: {formatted_id} ({state['sync_count']})"
|
status_message = f"Sync: {formatted_id} ({current_sync_count})" # Use current_sync_count
|
||||||
logger.info(f"Node is syncing. Last sub ID: {last_checking_info['last_sub_id']}, Num subs to sync: {last_checking_info['num_subs_to_sync']}")
|
logger.info(f"Node is syncing. Last sub ID: {last_checking_info['last_sub_id']}, Num subs to sync: {last_checking_info['num_subs_to_sync']}")
|
||||||
else:
|
else:
|
||||||
current_status_type = "OK"
|
current_status_type = "OK"
|
||||||
# Increment count only on transition from Sync to OK based on the *last* status type
|
# Increment count only on transition from Sync to OK
|
||||||
if previous_status == "Sync":
|
if previous_status == "Sync":
|
||||||
state["sync_count"] += 1
|
current_sync_count += 1 # Increment local count
|
||||||
logger.info(f"Sync completed. Sync count incremented to {state['sync_count']}.")
|
logger.info(f"Sync completed. Sync count incremented to {current_sync_count}.")
|
||||||
status_message = f"OK: {formatted_id} ({state['sync_count']})"
|
status_message = f"OK: {formatted_id} ({current_sync_count})" # Use current_sync_count
|
||||||
logger.info(f"Node is OK. Last sub ID: {last_checking_info['last_sub_id']}")
|
logger.info(f"Node is OK. Last sub ID: {last_checking_info['last_sub_id']}")
|
||||||
|
|
||||||
elif last_ignored_id:
|
elif last_ignored_id:
|
||||||
# Fallback to "Ignored" logs if "Checking" is missing
|
# Fallback to "Ignored" logs if "Checking" is missing
|
||||||
formatted_id = format_number(last_ignored_id)
|
formatted_id = format_number(last_ignored_id)
|
||||||
current_status_type = "Sync" # Assume sync if we only see ignored creations recently
|
current_status_type = "Sync" # Assume sync if we only see ignored creations recently
|
||||||
status_message = f"Sync: {formatted_id} ({state['sync_count']})"
|
status_message = f"Sync: {formatted_id} ({current_sync_count})" # Use current_sync_count
|
||||||
logger.info(f"Node possibly syncing (based on ignored logs). Last ignored ID: {last_ignored_id}")
|
logger.info(f"Node possibly syncing (based on ignored logs). Last ignored ID: {last_ignored_id}")
|
||||||
|
|
||||||
elif last_head_sub_id:
|
elif last_head_sub_id:
|
||||||
# Fallback to "head sub id" if others are missing
|
# Fallback to "head sub id" if others are missing
|
||||||
formatted_id = format_number(last_head_sub_id)
|
formatted_id = format_number(last_head_sub_id)
|
||||||
current_status_type = "OK" # Assume OK if this is the latest relevant info
|
current_status_type = "OK" # Assume OK if this is the latest relevant info
|
||||||
status_message = f"OK: {formatted_id} ({state['sync_count']})"
|
# Don't increment sync count here, only on Sync -> OK transition based on "Checking" logs
|
||||||
|
status_message = f"OK: {formatted_id} ({current_sync_count})" # Use current_sync_count
|
||||||
logger.info(f"Node status based on head sub id. Head sub ID: {last_head_sub_id}")
|
logger.info(f"Node status based on head sub id. Head sub ID: {last_head_sub_id}")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@ -250,21 +233,31 @@ def check_logs(logger):
|
|||||||
status_message = "Idle"
|
status_message = "Idle"
|
||||||
current_status_type = "Idle"
|
current_status_type = "Idle"
|
||||||
|
|
||||||
# Update state and write back if changed
|
# Return the results instead of writing to a file
|
||||||
state["last_status"] = current_status_type
|
return {
|
||||||
if state["sync_count"] != original_sync_count or state["last_status"] != previous_status:
|
"status_message": status_message,
|
||||||
write_state(state, logger)
|
"current_status_type": current_status_type,
|
||||||
|
"current_sync_count": current_sync_count
|
||||||
return {"status": status_message}
|
}
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = f"Error: Docker logs failed ({e.returncode})"
|
||||||
logger.error(f"Error running docker logs command: {e.stderr or e.stdout or e}")
|
logger.error(f"Error running docker logs command: {e.stderr or e.stdout or e}")
|
||||||
# Don't update state file on command error, keep previous state
|
# Return error status and original sync count
|
||||||
return {"status": f"Error: Docker logs failed ({e.returncode})"}
|
return {
|
||||||
|
"status_message": error_msg,
|
||||||
|
"current_status_type": "Error",
|
||||||
|
"current_sync_count": initial_sync_count # Return original count on error
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
error_msg = "Error: Log processing failed"
|
||||||
logger.error(f"Unexpected error processing logs: {e}", exc_info=True)
|
logger.error(f"Unexpected error processing logs: {e}", exc_info=True)
|
||||||
# Don't update state file on unexpected error
|
# Return error status and original sync count
|
||||||
return {"status": f"Error: Log processing failed"}
|
return {
|
||||||
|
"status_message": error_msg,
|
||||||
|
"current_status_type": "Error",
|
||||||
|
"current_sync_count": initial_sync_count # Return original count on error
|
||||||
|
}
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
colorama.init(autoreset=True)
|
colorama.init(autoreset=True)
|
||||||
@ -291,20 +284,56 @@ if __name__ == "__main__":
|
|||||||
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
current_vm = grist.find_record(name=GRIST_ROW_NAME, table=NODES_TABLE)[0]
|
||||||
def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE)
|
def grist_callback(msg): grist.update(current_vm.id, msg, NODES_TABLE)
|
||||||
|
|
||||||
|
# Get initial state from Grist
|
||||||
|
initial_sync_count = int(current_vm.Syncs or 0)
|
||||||
|
# Determine previous status type based on Health string (simplified)
|
||||||
|
previous_health_status = current_vm.Health or "Idle"
|
||||||
|
previous_status_type = "Idle" # Default
|
||||||
|
if previous_health_status.startswith("Sync"):
|
||||||
|
previous_status_type = "Sync"
|
||||||
|
elif previous_health_status.startswith("OK"):
|
||||||
|
previous_status_type = "OK"
|
||||||
|
elif previous_health_status.startswith("Error"):
|
||||||
|
previous_status_type = "Error" # Consider error state
|
||||||
|
|
||||||
|
logger.info(f"Initial state from Grist - Syncs: {initial_sync_count}, Health: {previous_health_status} (interpreted as: {previous_status_type})")
|
||||||
|
|
||||||
for attempt in range(3):
|
for attempt in range(3):
|
||||||
try:
|
try:
|
||||||
result = check_logs(logger)
|
# Pass initial state to check_logs
|
||||||
grist_callback({ "Health": result["status"] })
|
result = check_logs(logger, initial_sync_count, previous_status_type)
|
||||||
logger.info(f"Status: {result['status']} ()")
|
|
||||||
if result["status"] == "Idle":
|
grist_updates = {"Health": result["status_message"]}
|
||||||
|
|
||||||
|
# Update Syncs count in Grist only if it changed
|
||||||
|
if result["current_sync_count"] != initial_sync_count:
|
||||||
|
grist_updates["Syncs"] = result["current_sync_count"]
|
||||||
|
logger.info(f"Sync count changed from {initial_sync_count} to {result['current_sync_count']}")
|
||||||
|
|
||||||
|
# Send updates to Grist
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
logger.info(f"Status update sent: {grist_updates}")
|
||||||
|
|
||||||
|
# Reboot logic (remains mostly the same, reads Reboots from current_vm)
|
||||||
|
if result["current_status_type"] == "Idle": # Check type, not message
|
||||||
uptime_seconds = os.popen("cat /proc/uptime | cut -d'.' -f1").read()
|
uptime_seconds = os.popen("cat /proc/uptime | cut -d'.' -f1").read()
|
||||||
uptime_seconds = int(uptime_seconds)
|
uptime_seconds = int(uptime_seconds)
|
||||||
if uptime_seconds > 60*60*1:
|
if uptime_seconds > 60*60*1:
|
||||||
grist_callback({ "Health": "Rebooting" })
|
reboot_count = int(current_vm.Reboots or 0)
|
||||||
|
reboot_count += 1
|
||||||
|
# Include reboot count in the final Grist update before rebooting
|
||||||
|
grist_updates = { "Health": "Rebooting", "Reboots": reboot_count }
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
logger.info(f"Idle detected for >1 hour (uptime: {uptime_seconds}s). Rebooting. Reboot count: {reboot_count}")
|
||||||
os.system("reboot")
|
os.system("reboot")
|
||||||
|
break # Exit loop on success
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error on attempt {attempt+1}/3: {e}")
|
logger.error(f"Error in main loop, attempt {attempt+1}/3: {e}", exc_info=True)
|
||||||
if attempt == 2:
|
if attempt == 2:
|
||||||
grist_callback({ "Health": f"Error: {e}" })
|
# Log final error to Grist on last attempt
|
||||||
if attempt < 2:
|
try:
|
||||||
time.sleep(5)
|
grist_updates = { "Health": f"Error: Main loop failed - {e}" }
|
||||||
|
grist_callback(grist_updates)
|
||||||
|
except Exception as grist_e:
|
||||||
|
logger.error(f"Failed to log final error to Grist: {grist_e}")
|
||||||
|
time.sleep(5) # Wait before retrying
|
||||||
|
Loading…
Reference in New Issue
Block a user